diff options
Diffstat (limited to 'binutils-2.25/bfd/elfnn-aarch64.c')
-rw-r--r-- | binutils-2.25/bfd/elfnn-aarch64.c | 874 |
1 files changed, 755 insertions, 119 deletions
diff --git a/binutils-2.25/bfd/elfnn-aarch64.c b/binutils-2.25/bfd/elfnn-aarch64.c index 8f0e7169..19ad4d29 100644 --- a/binutils-2.25/bfd/elfnn-aarch64.c +++ b/binutils-2.25/bfd/elfnn-aarch64.c @@ -1,5 +1,5 @@ /* AArch64-specific support for NN-bit ELF. - Copyright 2009-2013 Free Software Foundation, Inc. + Copyright (C) 2009-2014 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of BFD, the Binary File Descriptor library. @@ -1297,7 +1297,11 @@ static reloc_howto_type elfNN_aarch64_howto_table[] = 0, /* bitpos */ complain_overflow_dont, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ +#if ARCH_SIZE == 64 + AARCH64_R_STR (TLS_DTPMOD64), /* name */ +#else AARCH64_R_STR (TLS_DTPMOD), /* name */ +#endif FALSE, /* partial_inplace */ 0, /* src_mask */ ALL_ONES, /* dst_mask */ @@ -1311,7 +1315,11 @@ static reloc_howto_type elfNN_aarch64_howto_table[] = 0, /* bitpos */ complain_overflow_dont, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ +#if ARCH_SIZE == 64 + AARCH64_R_STR (TLS_DTPREL64), /* name */ +#else AARCH64_R_STR (TLS_DTPREL), /* name */ +#endif FALSE, /* partial_inplace */ 0, /* src_mask */ ALL_ONES, /* dst_mask */ @@ -1325,7 +1333,11 @@ static reloc_howto_type elfNN_aarch64_howto_table[] = 0, /* bitpos */ complain_overflow_dont, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ +#if ARCH_SIZE == 64 + AARCH64_R_STR (TLS_TPREL64), /* name */ +#else AARCH64_R_STR (TLS_TPREL), /* name */ +#endif FALSE, /* partial_inplace */ 0, /* src_mask */ ALL_ONES, /* dst_mask */ @@ -1536,9 +1548,9 @@ elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, return NULL; } -#define TARGET_LITTLE_SYM bfd_elfNN_littleaarch64_vec +#define TARGET_LITTLE_SYM aarch64_elfNN_le_vec #define TARGET_LITTLE_NAME "elfNN-littleaarch64" -#define TARGET_BIG_SYM bfd_elfNN_bigaarch64_vec +#define TARGET_BIG_SYM aarch64_elfNN_be_vec #define TARGET_BIG_NAME "elfNN-bigaarch64" /* The linker script knows the section names for placement. @@ -1599,6 +1611,12 @@ static const uint32_t aarch64_long_branch_stub[] = 0x00000000, }; +static const uint32_t aarch64_erratum_835769_stub[] = +{ + 0x00000000, /* Placeholder for multiply accumulate. */ + 0x14000000, /* b <label> */ +}; + /* Section name for stubs is the associated section name plus this string. */ #define STUB_SUFFIX ".stub" @@ -1608,6 +1626,7 @@ enum elf_aarch64_stub_type aarch64_stub_none, aarch64_stub_adrp_branch, aarch64_stub_long_branch, + aarch64_stub_erratum_835769_veneer, }; struct elf_aarch64_stub_hash_entry @@ -1642,6 +1661,10 @@ struct elf_aarch64_stub_hash_entry stub name in the hash table has to be unique; this does not, so it can be friendlier. */ char *output_name; + + /* The instruction which caused this stub to be generated (only valid for + erratum 835769 workaround stubs at present). */ + uint32_t veneered_insn; }; /* Used to build a map of a section. This is required for mixed-endian @@ -1667,6 +1690,17 @@ _aarch64_elf_section_data; #define elf_aarch64_section_data(sec) \ ((_aarch64_elf_section_data *) elf_section_data (sec)) +/* A fix-descriptor for erratum 835769. */ +struct aarch64_erratum_835769_fix +{ + bfd *input_bfd; + asection *section; + bfd_vma offset; + uint32_t veneered_insn; + char *stub_name; + enum elf_aarch64_stub_type stub_type; +}; + /* The size of the thread control block which is defined to be two pointers. */ #define TCB_SIZE (ARCH_SIZE/8)*2 @@ -1787,6 +1821,15 @@ struct elf_aarch64_link_hash_table /* Nonzero to force PIC branch veneers. */ int pic_veneer; + /* Fix erratum 835769. */ + int fix_erratum_835769; + + /* A table of fix locations for erratum 835769. This holds erratum + fix locations between elfNN_aarch64_size_stubs() and + elfNN_aarch64_write_section(). */ + struct aarch64_erratum_835769_fix *aarch64_erratum_835769_fixes; + unsigned int num_aarch64_erratum_835769_fixes; + /* The number of bytes in the initial entry in the PLT. */ bfd_size_type plt_header_size; @@ -2043,6 +2086,23 @@ elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info, _bfd_elf_link_hash_copy_indirect (info, dir, ind); } +/* Destroy an AArch64 elf linker hash table. */ + +static void +elfNN_aarch64_link_hash_table_free (bfd *obfd) +{ + struct elf_aarch64_link_hash_table *ret + = (struct elf_aarch64_link_hash_table *) obfd->link.hash; + + if (ret->loc_hash_table) + htab_delete (ret->loc_hash_table); + if (ret->loc_hash_memory) + objalloc_free ((struct objalloc *) ret->loc_hash_memory); + + bfd_hash_table_free (&ret->stub_hash_table); + _bfd_elf_link_hash_table_free (obfd); +} + /* Create an AArch64 elf linker hash table. */ static struct bfd_link_hash_table * @@ -2071,7 +2131,7 @@ elfNN_aarch64_link_hash_table_create (bfd *abfd) if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, sizeof (struct elf_aarch64_stub_hash_entry))) { - free (ret); + _bfd_elf_link_hash_table_free (abfd); return NULL; } @@ -2082,30 +2142,14 @@ elfNN_aarch64_link_hash_table_create (bfd *abfd) ret->loc_hash_memory = objalloc_create (); if (!ret->loc_hash_table || !ret->loc_hash_memory) { - free (ret); + elfNN_aarch64_link_hash_table_free (abfd); return NULL; } + ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free; return &ret->root.root; } -/* Free the derived linker hash table. */ - -static void -elfNN_aarch64_hash_table_free (struct bfd_link_hash_table *hash) -{ - struct elf_aarch64_link_hash_table *ret - = (struct elf_aarch64_link_hash_table *) hash; - - if (ret->loc_hash_table) - htab_delete (ret->loc_hash_table); - if (ret->loc_hash_memory) - objalloc_free ((struct objalloc *) ret->loc_hash_memory); - - bfd_hash_table_free (&ret->stub_hash_table); - _bfd_elf_link_hash_table_free (hash); -} - static bfd_boolean aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section, bfd_vma offset, bfd_vma value) @@ -2330,6 +2374,9 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry, bfd *stub_bfd; bfd_byte *loc; bfd_vma sym_value; + bfd_vma veneered_insn_loc; + bfd_vma veneer_entry_loc; + bfd_signed_vma branch_offset = 0; unsigned int template_size; const uint32_t *template; unsigned int i; @@ -2370,6 +2417,10 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry, template = aarch64_long_branch_stub; template_size = sizeof (aarch64_long_branch_stub); break; + case aarch64_stub_erratum_835769_veneer: + template = aarch64_erratum_835769_stub; + template_size = sizeof (aarch64_erratum_835769_stub); + break; default: BFD_FAIL (); return FALSE; @@ -2412,6 +2463,23 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry, stub_entry->stub_offset + 16, sym_value + 12, 0); break; + + case aarch64_stub_erratum_835769_veneer: + veneered_insn_loc = stub_entry->target_section->output_section->vma + + stub_entry->target_section->output_offset + + stub_entry->target_value; + veneer_entry_loc = stub_entry->stub_sec->output_section->vma + + stub_entry->stub_sec->output_offset + + stub_entry->stub_offset; + branch_offset = veneered_insn_loc - veneer_entry_loc; + branch_offset >>= 2; + branch_offset &= 0x3ffffff; + bfd_putl32 (stub_entry->veneered_insn, + stub_sec->contents + stub_entry->stub_offset); + bfd_putl32 (template[1] | branch_offset, + stub_sec->contents + stub_entry->stub_offset + 4); + break; + default: break; } @@ -2440,6 +2508,9 @@ aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, case aarch64_stub_long_branch: size = sizeof (aarch64_long_branch_stub); break; + case aarch64_stub_erratum_835769_veneer: + size = sizeof (aarch64_erratum_835769_stub); + break; default: BFD_FAIL (); return FALSE; @@ -2475,7 +2546,7 @@ elfNN_aarch64_setup_section_lists (bfd *output_bfd, /* Count the number of input BFDs and find the top input section id. */ for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; - input_bfd != NULL; input_bfd = input_bfd->link_next) + input_bfd != NULL; input_bfd = input_bfd->link.next) { bfd_count += 1; for (section = input_bfd->sections; @@ -2628,6 +2699,388 @@ group_sections (struct elf_aarch64_link_hash_table *htab, #undef PREV_SEC +#define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1)) + +#define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5) +#define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5) +#define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5) +#define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5) +#define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5) +#define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5) + +#define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000) +#define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1) +#define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3) +#define AARCH64_ZR 0x1f + +/* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for + LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */ + +#define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1) +#define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000) +#define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000) +#define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000) +#define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000) +#define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000) +#define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000) +#define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000) +#define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000) +#define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400) +#define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800) +#define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00) +#define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800) +#define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000) +#define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000) +#define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000) +#define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000) +#define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000) + +/* Classify an INSN if it is indeed a load/store. Return TRUE if INSN + is a load/store along with the Rt and Rtn. Return FALSE if not a + load/store. */ + +static bfd_boolean +aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn, + bfd_boolean *pair, bfd_boolean *load) +{ + uint32_t opcode; + unsigned int r; + uint32_t opc = 0; + uint32_t v = 0; + uint32_t opc_v = 0; + + /* Bail out quickly if INSN doesn't fall into the the load-store + encoding space. */ + if (!AARCH64_LDST (insn)) + return FALSE; + + *pair = FALSE; + *load = FALSE; + if (AARCH64_LDST_EX (insn)) + { + *rt = AARCH64_RT (insn); + *rtn = *rt; + if (AARCH64_BIT (insn, 21) == 1) + { + *pair = TRUE; + *rtn = AARCH64_RT2 (insn); + } + *load = AARCH64_LD (insn); + return TRUE; + } + else if (AARCH64_LDST_NAP (insn) + || AARCH64_LDSTP_PI (insn) + || AARCH64_LDSTP_O (insn) + || AARCH64_LDSTP_PRE (insn)) + { + *pair = TRUE; + *rt = AARCH64_RT (insn); + *rtn = AARCH64_RT2 (insn); + *load = AARCH64_LD (insn); + return TRUE; + } + else if (AARCH64_LDST_PCREL (insn) + || AARCH64_LDST_UI (insn) + || AARCH64_LDST_PIIMM (insn) + || AARCH64_LDST_U (insn) + || AARCH64_LDST_PREIMM (insn) + || AARCH64_LDST_RO (insn) + || AARCH64_LDST_UIMM (insn)) + { + *rt = AARCH64_RT (insn); + *rtn = *rt; + if (AARCH64_LDST_PCREL (insn)) + *load = TRUE; + opc = AARCH64_BITS (insn, 22, 2); + v = AARCH64_BIT (insn, 26); + opc_v = opc | (v << 2); + *load = (opc_v == 1 || opc_v == 2 || opc_v == 3 + || opc_v == 5 || opc_v == 7); + return TRUE; + } + else if (AARCH64_LDST_SIMD_M (insn) + || AARCH64_LDST_SIMD_M_PI (insn)) + { + *rt = AARCH64_RT (insn); + *load = AARCH64_BIT (insn, 22); + opcode = (insn >> 12) & 0xf; + switch (opcode) + { + case 0: + case 2: + *rtn = *rt + 3; + break; + + case 4: + case 6: + *rtn = *rt + 2; + break; + + case 7: + *rtn = *rt; + break; + + case 8: + case 10: + *rtn = *rt + 1; + break; + + default: + return FALSE; + } + return TRUE; + } + else if (AARCH64_LDST_SIMD_S (insn) + || AARCH64_LDST_SIMD_S_PI (insn)) + { + *rt = AARCH64_RT (insn); + r = (insn >> 21) & 1; + *load = AARCH64_BIT (insn, 22); + opcode = (insn >> 13) & 0x7; + switch (opcode) + { + case 0: + case 2: + case 4: + *rtn = *rt + r; + break; + + case 1: + case 3: + case 5: + *rtn = *rt + (r == 0 ? 2 : 3); + break; + + case 6: + *rtn = *rt + r; + break; + + case 7: + *rtn = *rt + (r == 0 ? 2 : 3); + break; + + default: + return FALSE; + } + return TRUE; + } + + return FALSE; +} + +/* Return TRUE if INSN is multiply-accumulate. */ + +static bfd_boolean +aarch64_mlxl_p (uint32_t insn) +{ + uint32_t op31 = AARCH64_OP31 (insn); + + if (AARCH64_MAC (insn) + && (op31 == 0 || op31 == 1 || op31 == 5) + /* Exclude MUL instructions which are encoded as a multiple accumulate + with RA = XZR. */ + && AARCH64_RA (insn) != AARCH64_ZR) + return TRUE; + + return FALSE; +} + +/* Some early revisions of the Cortex-A53 have an erratum (835769) whereby + it is possible for a 64-bit multiply-accumulate instruction to generate an + incorrect result. The details are quite complex and hard to + determine statically, since branches in the code may exist in some + circumstances, but all cases end with a memory (load, store, or + prefetch) instruction followed immediately by the multiply-accumulate + operation. We employ a linker patching technique, by moving the potentially + affected multiply-accumulate instruction into a patch region and replacing + the original instruction with a branch to the patch. This function checks + if INSN_1 is the memory operation followed by a multiply-accumulate + operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE + if INSN_1 and INSN_2 are safe. */ + +static bfd_boolean +aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2) +{ + uint32_t rt; + uint32_t rtn; + uint32_t rn; + uint32_t rm; + uint32_t ra; + bfd_boolean pair; + bfd_boolean load; + + if (aarch64_mlxl_p (insn_2) + && aarch64_mem_op_p (insn_1, &rt, &rtn, &pair, &load)) + { + /* Any SIMD memory op is independent of the subsequent MLA + by definition of the erratum. */ + if (AARCH64_BIT (insn_1, 26)) + return TRUE; + + /* If not SIMD, check for integer memory ops and MLA relationship. */ + rn = AARCH64_RN (insn_2); + ra = AARCH64_RA (insn_2); + rm = AARCH64_RM (insn_2); + + /* If this is a load and there's a true(RAW) dependency, we are safe + and this is not an erratum sequence. */ + if (load && + (rt == rn || rt == rm || rt == ra + || (pair && (rtn == rn || rtn == rm || rtn == ra)))) + return FALSE; + + /* We conservatively put out stubs for all other cases (including + writebacks). */ + return TRUE; + } + + return FALSE; +} + +static bfd_boolean +erratum_835769_scan (bfd *input_bfd, + struct bfd_link_info *info, + struct aarch64_erratum_835769_fix **fixes_p, + unsigned int *num_fixes_p, + unsigned int *fix_table_size_p) +{ + asection *section; + struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); + struct aarch64_erratum_835769_fix *fixes = *fixes_p; + unsigned int num_fixes = *num_fixes_p; + unsigned int fix_table_size = *fix_table_size_p; + + if (htab == NULL) + return FALSE; + + for (section = input_bfd->sections; + section != NULL; + section = section->next) + { + bfd_byte *contents = NULL; + struct _aarch64_elf_section_data *sec_data; + unsigned int span; + + if (elf_section_type (section) != SHT_PROGBITS + || (elf_section_flags (section) & SHF_EXECINSTR) == 0 + || (section->flags & SEC_EXCLUDE) != 0 + || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) + || (section->output_section == bfd_abs_section_ptr)) + continue; + + if (elf_section_data (section)->this_hdr.contents != NULL) + contents = elf_section_data (section)->this_hdr.contents; + else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) + return TRUE; + + sec_data = elf_aarch64_section_data (section); + for (span = 0; span < sec_data->mapcount; span++) + { + unsigned int span_start = sec_data->map[span].vma; + unsigned int span_end = ((span == sec_data->mapcount - 1) + ? sec_data->map[0].vma + section->size + : sec_data->map[span + 1].vma); + unsigned int i; + char span_type = sec_data->map[span].type; + + if (span_type == 'd') + continue; + + for (i = span_start; i + 4 < span_end; i += 4) + { + uint32_t insn_1 = bfd_getl32 (contents + i); + uint32_t insn_2 = bfd_getl32 (contents + i + 4); + + if (aarch64_erratum_sequence (insn_1, insn_2)) + { + char *stub_name = NULL; + stub_name = (char *) bfd_malloc + (strlen ("__erratum_835769_veneer_") + 16); + if (stub_name != NULL) + sprintf + (stub_name,"__erratum_835769_veneer_%d", num_fixes); + else + return TRUE; + + if (num_fixes == fix_table_size) + { + fix_table_size *= 2; + fixes = + (struct aarch64_erratum_835769_fix *) + bfd_realloc (fixes, + sizeof (struct aarch64_erratum_835769_fix) + * fix_table_size); + if (fixes == NULL) + return TRUE; + } + + fixes[num_fixes].input_bfd = input_bfd; + fixes[num_fixes].section = section; + fixes[num_fixes].offset = i + 4; + fixes[num_fixes].veneered_insn = insn_2; + fixes[num_fixes].stub_name = stub_name; + fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer; + num_fixes++; + } + } + } + if (elf_section_data (section)->this_hdr.contents == NULL) + free (contents); + } + + *fixes_p = fixes; + *num_fixes_p = num_fixes; + *fix_table_size_p = fix_table_size; + return FALSE; +} + +/* Find or create a stub section. Returns a pointer to the stub section, and + the section to which the stub section will be attached (in *LINK_SEC_P). + LINK_SEC_P may be NULL. */ + +static asection * +elf_aarch64_create_or_find_stub_sec (asection **link_sec_p, asection *section, + struct elf_aarch64_link_hash_table *htab) +{ + asection *link_sec; + asection *stub_sec; + + link_sec = htab->stub_group[section->id].link_sec; + BFD_ASSERT (link_sec != NULL); + stub_sec = htab->stub_group[section->id].stub_sec; + + if (stub_sec == NULL) + { + stub_sec = htab->stub_group[link_sec->id].stub_sec; + if (stub_sec == NULL) + { + size_t namelen; + bfd_size_type len; + char *s_name; + + namelen = strlen (link_sec->name); + len = namelen + sizeof (STUB_SUFFIX); + s_name = (char *) bfd_alloc (htab->stub_bfd, len); + if (s_name == NULL) + return NULL; + + memcpy (s_name, link_sec->name, namelen); + memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); + stub_sec = (*htab->add_stub_section) (s_name, link_sec); + + if (stub_sec == NULL) + return NULL; + htab->stub_group[link_sec->id].stub_sec = stub_sec; + } + htab->stub_group[section->id].stub_sec = stub_sec; + } + + if (link_sec_p) + *link_sec_p = link_sec; + + return stub_sec; +} + /* Determine and set the size of the stub section for a final link. The basic idea here is to examine all the relocations looking for @@ -2647,6 +3100,21 @@ elfNN_aarch64_size_stubs (bfd *output_bfd, bfd_boolean stubs_always_before_branch; bfd_boolean stub_changed = 0; struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); + struct aarch64_erratum_835769_fix *erratum_835769_fixes = NULL; + unsigned int num_erratum_835769_fixes = 0; + unsigned int erratum_835769_fix_table_size = 10; + unsigned int i; + + if (htab->fix_erratum_835769) + { + erratum_835769_fixes + = (struct aarch64_erratum_835769_fix *) + bfd_zmalloc + (sizeof (struct aarch64_erratum_835769_fix) * + erratum_835769_fix_table_size); + if (erratum_835769_fixes == NULL) + goto error_ret_free_local; + } /* Propagate mach to stub bfd, because it may not have been finalized when we created stub_bfd. */ @@ -2677,9 +3145,11 @@ elfNN_aarch64_size_stubs (bfd *output_bfd, bfd *input_bfd; unsigned int bfd_indx; asection *stub_sec; + unsigned prev_num_erratum_835769_fixes = num_erratum_835769_fixes; + num_erratum_835769_fixes = 0; for (input_bfd = info->input_bfds, bfd_indx = 0; - input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++) + input_bfd != NULL; input_bfd = input_bfd->link.next, bfd_indx++) { Elf_Internal_Shdr *symtab_hdr; asection *section; @@ -2930,8 +3400,20 @@ elfNN_aarch64_size_stubs (bfd *output_bfd, if (elf_section_data (section)->relocs == NULL) free (internal_relocs); } + + if (htab->fix_erratum_835769) + { + /* Scan for sequences which might trigger erratum 835769. */ + if (erratum_835769_scan (input_bfd, info, &erratum_835769_fixes, + &num_erratum_835769_fixes, + &erratum_835769_fix_table_size) != 0) + goto error_ret_free_local; + } } + if (prev_num_erratum_835769_fixes != num_erratum_835769_fixes) + stub_changed = TRUE; + if (!stub_changed) break; @@ -2939,15 +3421,76 @@ elfNN_aarch64_size_stubs (bfd *output_bfd, stub sections. */ for (stub_sec = htab->stub_bfd->sections; stub_sec != NULL; stub_sec = stub_sec->next) - stub_sec->size = 0; + { + /* Ignore non-stub sections. */ + if (!strstr (stub_sec->name, STUB_SUFFIX)) + continue; + stub_sec->size = 0; + } bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab); + /* Add erratum 835769 veneers to stub section sizes too. */ + if (htab->fix_erratum_835769) + for (i = 0; i < num_erratum_835769_fixes; i++) + { + stub_sec = elf_aarch64_create_or_find_stub_sec (NULL, + erratum_835769_fixes[i].section, htab); + + if (stub_sec == NULL) + goto error_ret_free_local; + + stub_sec->size += 8; + } + /* Ask the linker to do its stuff. */ (*htab->layout_sections_again) (); stub_changed = FALSE; } + /* Add stubs for erratum 835769 fixes now. */ + if (htab->fix_erratum_835769) + { + for (i = 0; i < num_erratum_835769_fixes; i++) + { + struct elf_aarch64_stub_hash_entry *stub_entry; + char *stub_name = erratum_835769_fixes[i].stub_name; + asection *section = erratum_835769_fixes[i].section; + unsigned int section_id = erratum_835769_fixes[i].section->id; + asection *link_sec = htab->stub_group[section_id].link_sec; + asection *stub_sec = htab->stub_group[section_id].stub_sec; + + stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, + stub_name, TRUE, FALSE); + if (stub_entry == NULL) + { + (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), + section->owner, + stub_name); + return FALSE; + } + + stub_entry->stub_sec = stub_sec; + stub_entry->stub_offset = 0; + stub_entry->id_sec = link_sec; + stub_entry->stub_type = erratum_835769_fixes[i].stub_type; + stub_entry->target_section = section; + stub_entry->target_value = erratum_835769_fixes[i].offset; + stub_entry->veneered_insn = erratum_835769_fixes[i].veneered_insn; + stub_entry->output_name = erratum_835769_fixes[i].stub_name; + } + + /* Stash the erratum 835769 fix array for use later in + elfNN_aarch64_write_section(). */ + htab->aarch64_erratum_835769_fixes = erratum_835769_fixes; + htab->num_aarch64_erratum_835769_fixes = num_erratum_835769_fixes; + } + else + { + htab->aarch64_erratum_835769_fixes = NULL; + htab->num_aarch64_erratum_835769_fixes = 0; + } + return TRUE; error_ret_free_local: @@ -3040,7 +3583,7 @@ bfd_elfNN_aarch64_init_maps (bfd *abfd) return; if ((abfd->flags & DYNAMIC) != 0) - return; + return; hdr = &elf_symtab_hdr (abfd); localsyms = hdr->sh_info; @@ -3078,12 +3621,14 @@ void bfd_elfNN_aarch64_set_options (struct bfd *output_bfd, struct bfd_link_info *link_info, int no_enum_warn, - int no_wchar_warn, int pic_veneer) + int no_wchar_warn, int pic_veneer, + int fix_erratum_835769) { struct elf_aarch64_link_hash_table *globals; globals = elf_aarch64_hash_table (link_info); globals->pic_veneer = pic_veneer; + globals->fix_erratum_835769 = fix_erratum_835769; BFD_ASSERT (is_aarch64_elf (output_bfd)); elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; @@ -3289,8 +3834,7 @@ tpoff_base (struct bfd_link_info *info) struct elf_link_hash_table *htab = elf_hash_table (info); /* If tls_sec is NULL, we should have signalled an error already. */ - if (htab->tls_sec == NULL) - return 0; + BFD_ASSERT (htab->tls_sec != NULL); bfd_vma base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power); @@ -3394,6 +3938,89 @@ symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, return value; } +/* Data for make_branch_to_erratum_835769_stub(). */ + +struct erratum_835769_branch_to_stub_data +{ + asection *output_section; + bfd_byte *contents; +}; + +/* Helper to insert branches to erratum 835769 stubs in the right + places for a particular section. */ + +static bfd_boolean +make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry, + void *in_arg) +{ + struct elf_aarch64_stub_hash_entry *stub_entry; + struct erratum_835769_branch_to_stub_data *data; + bfd_byte *contents; + unsigned long branch_insn = 0; + bfd_vma veneered_insn_loc, veneer_entry_loc; + bfd_signed_vma branch_offset; + unsigned int target; + bfd *abfd; + + stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; + data = (struct erratum_835769_branch_to_stub_data *) in_arg; + + if (stub_entry->target_section != data->output_section + || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer) + return TRUE; + + contents = data->contents; + veneered_insn_loc = stub_entry->target_section->output_section->vma + + stub_entry->target_section->output_offset + + stub_entry->target_value; + veneer_entry_loc = stub_entry->stub_sec->output_section->vma + + stub_entry->stub_sec->output_offset + + stub_entry->stub_offset; + branch_offset = veneer_entry_loc - veneered_insn_loc; + + abfd = stub_entry->target_section->owner; + if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) + (*_bfd_error_handler) + (_("%B: error: Erratum 835769 stub out " + "of range (input file too large)"), abfd); + + target = stub_entry->target_value; + branch_insn = 0x14000000; + branch_offset >>= 2; + branch_offset &= 0x3ffffff; + branch_insn |= branch_offset; + bfd_putl32 (branch_insn, &contents[target]); + + return TRUE; +} + +static bfd_boolean +elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED, + struct bfd_link_info *link_info, + asection *sec, + bfd_byte *contents) + +{ + struct elf_aarch64_link_hash_table *globals = + elf_aarch64_hash_table (link_info); + + if (globals == NULL) + return FALSE; + + /* Fix code to point to erratum 835769 stubs. */ + if (globals->fix_erratum_835769) + { + struct erratum_835769_branch_to_stub_data data; + + data.output_section = sec; + data.contents = contents; + bfd_hash_traverse (&globals->stub_hash_table, + make_branch_to_erratum_835769_stub, &data); + } + + return FALSE; +} + /* Perform a relocation as part of a final link. */ static bfd_reloc_status_type elfNN_aarch64_final_link_relocate (reloc_howto_type *howto, @@ -3577,7 +4204,8 @@ elfNN_aarch64_final_link_relocate (reloc_howto_type *howto, if (globals->root.splt != NULL) { - plt_index = h->plt.offset / globals->plt_entry_size - 1; + plt_index = ((h->plt.offset - globals->plt_header_size) / + globals->plt_entry_size); off = (plt_index + 3) * GOT_ENTRY_SIZE; base_got = globals->root.sgotplt; } @@ -3831,7 +4459,7 @@ elfNN_aarch64_final_link_relocate (reloc_howto_type *howto, value = (symbol_got_offset (input_bfd, h, r_symndx) + globals->root.sgot->output_section->vma - + globals->root.sgot->output_section->output_offset); + + globals->root.sgot->output_offset); value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, 0, weak_undef_p); @@ -3860,10 +4488,9 @@ elfNN_aarch64_final_link_relocate (reloc_howto_type *howto, case BFD_RELOC_AARCH64_TLSDESC_LDR: if (globals->root.sgot == NULL) return bfd_reloc_notsupported; - value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx) + globals->root.sgotplt->output_section->vma - + globals->root.sgotplt->output_section->output_offset + + globals->root.sgotplt->output_offset + globals->sgotplt_jump_table_size); value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, @@ -3926,7 +4553,6 @@ elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals, or adrp x0, :tlsdesc:var => adrp x0, :gottprel:var */ - insn = bfd_getl32 (contents + rel->r_offset); return bfd_reloc_continue; } @@ -3945,7 +4571,7 @@ elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals, ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var] */ insn = bfd_getl32 (contents + rel->r_offset); - insn &= 0xfffffff0; + insn &= 0xffffffe0; bfd_putl32 (insn, contents + rel->r_offset); return bfd_reloc_continue; } @@ -4132,12 +4758,12 @@ elfNN_aarch64_relocate_section (bfd *output_bfd, } else { - bfd_boolean warned; + bfd_boolean warned, ignored; RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, r_symndx, symtab_hdr, sym_hashes, h, sec, relocation, - unresolved_reloc, warned); + unresolved_reloc, warned, ignored); sym_type = h->type; } @@ -4147,15 +4773,7 @@ elfNN_aarch64_relocate_section (bfd *output_bfd, rel, 1, relend, howto, 0, contents); if (info->relocatable) - { - /* This is a relocatable link. We don't have to change - anything, unless the reloc is against a section symbol, - in which case we have to adjust according to where the - section symbol winds up in the output section. */ - if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION) - rel->r_addend += sec->output_offset; - continue; - } + continue; if (h != NULL) name = h->root.root.string; @@ -4525,31 +5143,6 @@ elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags) return TRUE; } -/* Copy backend specific data from one object module to another. */ - -static bfd_boolean -elfNN_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd) -{ - flagword in_flags; - - if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd)) - return TRUE; - - in_flags = elf_elfheader (ibfd)->e_flags; - - elf_elfheader (obfd)->e_flags = in_flags; - elf_flags_init (obfd) = TRUE; - - /* Also copy the EI_OSABI field. */ - elf_elfheader (obfd)->e_ident[EI_OSABI] = - elf_elfheader (ibfd)->e_ident[EI_OSABI]; - - /* Copy object attributes. */ - _bfd_elf_copy_obj_attributes (ibfd, obfd); - - return TRUE; -} - /* Merge backend specific data from an object file to the output object file when linking. */ @@ -4788,16 +5381,6 @@ elfNN_aarch64_gc_sweep_hook (bfd *abfd, } break; - case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: - case BFD_RELOC_AARCH64_ADR_HI21_PCREL: - case BFD_RELOC_AARCH64_ADR_LO21_PCREL: - if (h != NULL && info->executable) - { - if (h->plt.refcount > 0) - h->plt.refcount -= 1; - } - break; - case BFD_RELOC_AARCH64_CALL26: case BFD_RELOC_AARCH64_JUMP26: /* If this is a local symbol then we resolve it @@ -4809,6 +5392,13 @@ elfNN_aarch64_gc_sweep_hook (bfd *abfd, h->plt.refcount -= 1; break; + case BFD_RELOC_AARCH64_MOVW_G0_NC: + case BFD_RELOC_AARCH64_MOVW_G1_NC: + case BFD_RELOC_AARCH64_MOVW_G2_NC: + case BFD_RELOC_AARCH64_MOVW_G3: + case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: + case BFD_RELOC_AARCH64_ADR_HI21_PCREL: + case BFD_RELOC_AARCH64_ADR_LO21_PCREL: case BFD_RELOC_AARCH64_NN: if (h != NULL && info->executable) { @@ -5305,6 +5895,22 @@ elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info, break; } + case BFD_RELOC_AARCH64_MOVW_G0_NC: + case BFD_RELOC_AARCH64_MOVW_G1_NC: + case BFD_RELOC_AARCH64_MOVW_G2_NC: + case BFD_RELOC_AARCH64_MOVW_G3: + if (info->shared) + { + int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; + (*_bfd_error_handler) + (_("%B: relocation %s against `%s' can not be used when making " + "a shared object; recompile with -fPIC"), + abfd, elfNN_aarch64_howto_table[howto_index].name, + (h) ? h->root.root.string : "a local symbol"); + bfd_set_error (bfd_error_bad_value); + return FALSE; + } + case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: case BFD_RELOC_AARCH64_ADR_HI21_PCREL: case BFD_RELOC_AARCH64_ADR_LO21_PCREL: @@ -5362,8 +5968,8 @@ elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED, static bfd_boolean aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED, - asection *section, asymbol **symbols, + asection *section, bfd_vma offset, const char **filename_ptr, const char **functionname_ptr) @@ -5422,32 +6028,33 @@ aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED, static bfd_boolean elfNN_aarch64_find_nearest_line (bfd *abfd, - asection *section, asymbol **symbols, + asection *section, bfd_vma offset, const char **filename_ptr, const char **functionname_ptr, - unsigned int *line_ptr) + unsigned int *line_ptr, + unsigned int *discriminator_ptr) { bfd_boolean found = FALSE; - /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64 - toolchain uses it. */ - - if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections, - section, symbols, offset, + if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset, filename_ptr, functionname_ptr, - line_ptr, NULL, 0, + line_ptr, discriminator_ptr, + dwarf_debug_sections, 0, &elf_tdata (abfd)->dwarf2_find_line_info)) { if (!*functionname_ptr) - aarch64_elf_find_function (abfd, section, symbols, offset, + aarch64_elf_find_function (abfd, symbols, section, offset, *filename_ptr ? NULL : filename_ptr, functionname_ptr); return TRUE; } + /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64 + toolchain uses DWARF1. */ + if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, &found, filename_ptr, functionname_ptr, line_ptr, @@ -5460,7 +6067,7 @@ elfNN_aarch64_find_nearest_line (bfd *abfd, if (symbols == NULL) return FALSE; - if (!aarch64_elf_find_function (abfd, section, symbols, offset, + if (!aarch64_elf_find_function (abfd, symbols, section, offset, filename_ptr, functionname_ptr)) return FALSE; @@ -5491,7 +6098,7 @@ elfNN_aarch64_post_process_headers (bfd *abfd, i_ehdrp = elf_elfheader (abfd); i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION; - _bfd_elf_set_osabi (abfd, link_info); + _bfd_elf_post_process_headers (abfd, link_info); } static enum elf_reloc_type_class @@ -5512,17 +6119,6 @@ elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSE } } -/* Set the right machine number for an AArch64 ELF file. */ - -static bfd_boolean -elfNN_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr) -{ - if (hdr->sh_type == SHT_NOTE) - *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS; - - return TRUE; -} - /* Handle an AArch64 specific section when reading an object file. This is called when bfd_section_from_shdr finds a section with an unknown type. */ @@ -5767,6 +6363,13 @@ aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16)) return FALSE; break; + case aarch64_stub_erratum_835769_veneer: + if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, + sizeof (aarch64_erratum_835769_stub))) + return FALSE; + if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) + return FALSE; + break; default: BFD_FAIL (); } @@ -6315,7 +6918,7 @@ elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED, /* Set up .got offsets for local syms, and space for local dynamic relocs. */ - for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) + for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) { struct elf_aarch64_local_symbol *locals = NULL; Elf_Internal_Shdr *symtab_hdr; @@ -6462,6 +7065,16 @@ elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED, } } + /* Init mapping symbols information to use later to distingush between + code and data while scanning for erratam 835769. */ + if (htab->fix_erratum_835769) + for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) + { + if (!is_aarch64_elf (ibfd)) + continue; + bfd_elfNN_aarch64_init_maps (ibfd); + } + /* We now have determined the sizes of the various dynamic sections. Allocate memory for them. */ relocs = FALSE; @@ -6639,7 +7252,7 @@ elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h, plt_entry = plt->contents + h->plt.offset; plt_entry_address = plt->output_section->vma - + plt->output_section->output_offset + h->plt.offset; + + plt->output_offset + h->plt.offset; gotplt_entry_address = gotplt->output_section->vma + gotplt->output_offset + got_offset; @@ -6811,7 +7424,34 @@ elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd, + htab->root.sgot->output_offset + (h->got.offset & ~(bfd_vma) 1)); - if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h)) + if (h->def_regular + && h->type == STT_GNU_IFUNC) + { + if (info->shared) + { + /* Generate R_AARCH64_GLOB_DAT. */ + goto do_glob_dat; + } + else + { + asection *plt; + + if (!h->pointer_equality_needed) + abort (); + + /* For non-shared object, we can't use .got.plt, which + contains the real function address if we need pointer + equality. We load the GOT entry with the PLT entry. */ + plt = htab->root.splt ? htab->root.splt : htab->root.iplt; + bfd_put_NN (output_bfd, (plt->output_section->vma + + plt->output_offset + + h->plt.offset), + htab->root.sgot->contents + + (h->got.offset & ~(bfd_vma) 1)); + return TRUE; + } + } + else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h)) { if (!h->def_regular) return FALSE; @@ -6824,6 +7464,7 @@ elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd, } else { +do_glob_dat: BFD_ASSERT ((h->got.offset & 1) == 0); bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgot->contents + h->got.offset); @@ -6918,7 +7559,7 @@ elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED, + GOT_ENTRY_SIZE * 2); plt_base = htab->root.splt->output_section->vma + - htab->root.splt->output_section->output_offset; + htab->root.splt->output_offset; /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ @@ -6978,7 +7619,7 @@ elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd, break; case DT_PLTRELSZ: - s = htab->root.srelplt->output_section; + s = htab->root.srelplt; dyn.d_un.d_val = s->size; break; @@ -6992,7 +7633,7 @@ elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd, about changing the DT_RELA entry. */ if (htab->root.srelplt != NULL) { - s = htab->root.srelplt->output_section; + s = htab->root.srelplt; dyn.d_un.d_val -= s->size; } break; @@ -7185,9 +7826,6 @@ const struct elf_size_info elfNN_aarch64_size_info = #define bfd_elfNN_close_and_cleanup \ elfNN_aarch64_close_and_cleanup -#define bfd_elfNN_bfd_copy_private_bfd_data \ - elfNN_aarch64_copy_private_bfd_data - #define bfd_elfNN_bfd_free_cached_info \ elfNN_aarch64_bfd_free_cached_info @@ -7197,9 +7835,6 @@ const struct elf_size_info elfNN_aarch64_size_info = #define bfd_elfNN_bfd_link_hash_table_create \ elfNN_aarch64_link_hash_table_create -#define bfd_elfNN_bfd_link_hash_table_free \ - elfNN_aarch64_hash_table_free - #define bfd_elfNN_bfd_merge_private_bfd_data \ elfNN_aarch64_merge_private_bfd_data @@ -7274,9 +7909,6 @@ const struct elf_size_info elfNN_aarch64_size_info = #define elf_backend_reloc_type_class \ elfNN_aarch64_reloc_type_class -#define elf_backend_section_flags \ - elfNN_aarch64_section_flags - #define elf_backend_section_from_shdr \ elfNN_aarch64_section_from_shdr @@ -7286,6 +7918,9 @@ const struct elf_size_info elfNN_aarch64_size_info = #define elf_backend_size_info \ elfNN_aarch64_size_info +#define elf_backend_write_section \ + elfNN_aarch64_write_section + #define elf_backend_can_refcount 1 #define elf_backend_can_gc_sections 1 #define elf_backend_plt_readonly 1 @@ -7294,6 +7929,7 @@ const struct elf_size_info elfNN_aarch64_size_info = #define elf_backend_may_use_rel_p 0 #define elf_backend_may_use_rela_p 1 #define elf_backend_default_use_rela_p 1 +#define elf_backend_rela_normal 1 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3) #define elf_backend_default_execstack 0 |