From 5e8c1cf25beccac1d22d10dc866912394f42771b Mon Sep 17 00:00:00 2001 From: Andrew Hsieh Date: Tue, 9 Dec 2014 17:57:18 +0800 Subject: [2.25] sync to a30720e3e633f275250e26f85ccae5dbdddfb6c6 local patches will be re-applied later commit a30720e3e633f275250e26f85ccae5dbdddfb6c6 Author: Alan Modra Date: Wed Nov 19 10:30:16 2014 +1030 daily update Change-Id: Ieb2a3f4dd2ecb289ac5305ff08d428b2847494ab --- binutils-2.25/gas/config/tc-arm.c | 1071 +++++++++++++++++++++++++------------ 1 file changed, 729 insertions(+), 342 deletions(-) (limited to 'binutils-2.25/gas/config/tc-arm.c') diff --git a/binutils-2.25/gas/config/tc-arm.c b/binutils-2.25/gas/config/tc-arm.c index d170f43d..5077f87e 100644 --- a/binutils-2.25/gas/config/tc-arm.c +++ b/binutils-2.25/gas/config/tc-arm.c @@ -1,5 +1,5 @@ /* tc-arm.c -- Assemble for the ARM - Copyright 1994-2013 Free Software Foundation, Inc. + Copyright (C) 1994-2014 Free Software Foundation, Inc. Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) Modified by David Taylor (dtaylor@armltd.co.uk) Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com) @@ -137,6 +137,8 @@ static int fix_v4bx = FALSE; /* Warn on using deprecated features. */ static int warn_on_deprecated = TRUE; +/* Understand CodeComposer Studio assembly syntax. */ +bfd_boolean codecomposer_syntax = FALSE; /* Variables that we set while parsing command-line options. Once all options have been read we re-process these values to set the real @@ -247,6 +249,8 @@ static arm_feature_set selected_cpu = ARM_ARCH_NONE; /* Must be long enough to hold any of the names in arm_cpus. */ static char selected_cpu_name[16]; +extern FLONUM_TYPE generic_floating_point_number; + /* Return if no cpu was selected on command-line. */ static bfd_boolean no_cpu_selected (void) @@ -628,6 +632,7 @@ struct asm_opcode #define LITERAL_MASK 0xf000f000 #define OPCODE_MASK 0xfe1fffff #define V4_STR_BIT 0x00000020 +#define VLDR_VMOV_SAME 0x0040f000 #define T2_SUBS_PC_LR 0xf3de8f00 @@ -790,11 +795,21 @@ typedef struct literal_pool struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE]; #endif struct literal_pool * next; + unsigned int alignment; } literal_pool; /* Pointer to a linked list of literal pools. */ literal_pool * list_of_pools = NULL; +typedef enum asmfunc_states +{ + OUTSIDE_ASMFUNC, + WAITING_ASMFUNC_NAME, + WAITING_ENDASMFUNC +} asmfunc_states; + +static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC; + #ifdef OBJ_ELF # define now_it seg_info (now_seg)->tc_segment_info_data.current_it #else @@ -853,7 +868,7 @@ static void it_fsm_post_encode (void); /* This array holds the chars that always start a comment. If the pre-processor is disabled, these aren't very useful. */ -const char comment_chars[] = "@"; +char arm_comment_chars[] = "@"; /* This array holds the chars that only start a comment at the beginning of a line. If the line seems to have the form '# 123 filename' @@ -864,7 +879,7 @@ const char comment_chars[] = "@"; /* Also note that comments like this one will always work. */ const char line_comment_chars[] = "#"; -const char line_separator_chars[] = ";"; +char arm_line_separator_chars[] = ";"; /* Chars that can be used to separate mant from exp in floating point numbers. */ @@ -3012,6 +3027,104 @@ s_even (int ignore ATTRIBUTE_UNUSED) demand_empty_rest_of_line (); } +/* Directives: CodeComposer Studio. */ + +/* .ref (for CodeComposer Studio syntax only). */ +static void +s_ccs_ref (int unused ATTRIBUTE_UNUSED) +{ + if (codecomposer_syntax) + ignore_rest_of_line (); + else + as_bad (_(".ref pseudo-op only available with -mccs flag.")); +} + +/* If name is not NULL, then it is used for marking the beginning of a + function, wherease if it is NULL then it means the function end. */ +static void +asmfunc_debug (const char * name) +{ + static const char * last_name = NULL; + + if (name != NULL) + { + gas_assert (last_name == NULL); + last_name = name; + + if (debug_type == DEBUG_STABS) + stabs_generate_asm_func (name, name); + } + else + { + gas_assert (last_name != NULL); + + if (debug_type == DEBUG_STABS) + stabs_generate_asm_endfunc (last_name, last_name); + + last_name = NULL; + } +} + +static void +s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED) +{ + if (codecomposer_syntax) + { + switch (asmfunc_state) + { + case OUTSIDE_ASMFUNC: + asmfunc_state = WAITING_ASMFUNC_NAME; + break; + + case WAITING_ASMFUNC_NAME: + as_bad (_(".asmfunc repeated.")); + break; + + case WAITING_ENDASMFUNC: + as_bad (_(".asmfunc without function.")); + break; + } + demand_empty_rest_of_line (); + } + else + as_bad (_(".asmfunc pseudo-op only available with -mccs flag.")); +} + +static void +s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED) +{ + if (codecomposer_syntax) + { + switch (asmfunc_state) + { + case OUTSIDE_ASMFUNC: + as_bad (_(".endasmfunc without a .asmfunc.")); + break; + + case WAITING_ASMFUNC_NAME: + as_bad (_(".endasmfunc without function.")); + break; + + case WAITING_ENDASMFUNC: + asmfunc_state = OUTSIDE_ASMFUNC; + asmfunc_debug (NULL); + break; + } + demand_empty_rest_of_line (); + } + else + as_bad (_(".endasmfunc pseudo-op only available with -mccs flag.")); +} + +static void +s_ccs_def (int name) +{ + if (codecomposer_syntax) + s_globl (name); + else + as_bad (_(".def pseudo-op only available with -mccs flag.")); +} + /* Directives: Literal pools. */ static literal_pool * @@ -3050,6 +3163,7 @@ find_or_make_literal_pool (void) pool->sub_section = now_subseg; pool->next = list_of_pools; pool->symbol = NULL; + pool->alignment = 2; /* Add it to the list. */ list_of_pools = pool; @@ -3071,33 +3185,74 @@ find_or_make_literal_pool (void) structure to the relevant literal pool. */ static int -add_to_lit_pool (void) +add_to_lit_pool (unsigned int nbytes) { +#define PADDING_SLOT 0x1 +#define LIT_ENTRY_SIZE_MASK 0xFF literal_pool * pool; - unsigned int entry; + unsigned int entry, pool_size = 0; + bfd_boolean padding_slot_p = FALSE; + unsigned imm1 = 0; + unsigned imm2 = 0; + + if (nbytes == 8) + { + imm1 = inst.operands[1].imm; + imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg + : inst.reloc.exp.X_unsigned ? 0 + : ((bfd_int64_t) inst.operands[1].imm) >> 32); + if (target_big_endian) + { + imm1 = imm2; + imm2 = inst.operands[1].imm; + } + } pool = find_or_make_literal_pool (); /* Check if this literal value is already in the pool. */ for (entry = 0; entry < pool->next_free_entry; entry ++) { - if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) - && (inst.reloc.exp.X_op == O_constant) - && (pool->literals[entry].X_add_number - == inst.reloc.exp.X_add_number) - && (pool->literals[entry].X_unsigned - == inst.reloc.exp.X_unsigned)) + if (nbytes == 4) + { + if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) + && (inst.reloc.exp.X_op == O_constant) + && (pool->literals[entry].X_add_number + == inst.reloc.exp.X_add_number) + && (pool->literals[entry].X_md == nbytes) + && (pool->literals[entry].X_unsigned + == inst.reloc.exp.X_unsigned)) + break; + + if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) + && (inst.reloc.exp.X_op == O_symbol) + && (pool->literals[entry].X_add_number + == inst.reloc.exp.X_add_number) + && (pool->literals[entry].X_add_symbol + == inst.reloc.exp.X_add_symbol) + && (pool->literals[entry].X_op_symbol + == inst.reloc.exp.X_op_symbol) + && (pool->literals[entry].X_md == nbytes)) + break; + } + else if ((nbytes == 8) + && !(pool_size & 0x7) + && ((entry + 1) != pool->next_free_entry) + && (pool->literals[entry].X_op == O_constant) + && (pool->literals[entry].X_add_number == (offsetT) imm1) + && (pool->literals[entry].X_unsigned + == inst.reloc.exp.X_unsigned) + && (pool->literals[entry + 1].X_op == O_constant) + && (pool->literals[entry + 1].X_add_number == (offsetT) imm2) + && (pool->literals[entry + 1].X_unsigned + == inst.reloc.exp.X_unsigned)) break; - if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) - && (inst.reloc.exp.X_op == O_symbol) - && (pool->literals[entry].X_add_number - == inst.reloc.exp.X_add_number) - && (pool->literals[entry].X_add_symbol - == inst.reloc.exp.X_add_symbol) - && (pool->literals[entry].X_op_symbol - == inst.reloc.exp.X_op_symbol)) + padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT); + if (padding_slot_p && (nbytes == 4)) break; + + pool_size += 4; } /* Do we need to create a new entry? */ @@ -3109,7 +3264,64 @@ add_to_lit_pool (void) return FAIL; } - pool->literals[entry] = inst.reloc.exp; + if (nbytes == 8) + { + /* For 8-byte entries, we align to an 8-byte boundary, + and split it into two 4-byte entries, because on 32-bit + host, 8-byte constants are treated as big num, thus + saved in "generic_bignum" which will be overwritten + by later assignments. + + We also need to make sure there is enough space for + the split. + + We also check to make sure the literal operand is a + constant number. */ + if (!(inst.reloc.exp.X_op == O_constant + || inst.reloc.exp.X_op == O_big)) + { + inst.error = _("invalid type for literal pool"); + return FAIL; + } + else if (pool_size & 0x7) + { + if ((entry + 2) >= MAX_LITERAL_POOL_SIZE) + { + inst.error = _("literal pool overflow"); + return FAIL; + } + + pool->literals[entry] = inst.reloc.exp; + pool->literals[entry].X_add_number = 0; + pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4; + pool->next_free_entry += 1; + pool_size += 4; + } + else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE) + { + inst.error = _("literal pool overflow"); + return FAIL; + } + + pool->literals[entry] = inst.reloc.exp; + pool->literals[entry].X_op = O_constant; + pool->literals[entry].X_add_number = imm1; + pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned; + pool->literals[entry++].X_md = 4; + pool->literals[entry] = inst.reloc.exp; + pool->literals[entry].X_op = O_constant; + pool->literals[entry].X_add_number = imm2; + pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned; + pool->literals[entry].X_md = 4; + pool->alignment = 3; + pool->next_free_entry += 1; + } + else + { + pool->literals[entry] = inst.reloc.exp; + pool->literals[entry].X_md = 4; + } + #ifdef OBJ_ELF /* PR ld/12974: Record the location of the first source line to reference this entry in the literal pool. If it turns out during linking that the @@ -3120,14 +3332,45 @@ add_to_lit_pool (void) #endif pool->next_free_entry += 1; } + else if (padding_slot_p) + { + pool->literals[entry] = inst.reloc.exp; + pool->literals[entry].X_md = nbytes; + } inst.reloc.exp.X_op = O_symbol; - inst.reloc.exp.X_add_number = ((int) entry) * 4; + inst.reloc.exp.X_add_number = pool_size; inst.reloc.exp.X_add_symbol = pool->symbol; return SUCCESS; } +bfd_boolean +tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest) +{ + bfd_boolean ret = TRUE; + + if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME) + { + const char *label = rest; + + while (!is_end_of_line[(int) label[-1]]) + --label; + + if (*label == '.') + { + as_bad (_("Invalid label '%s'"), label); + ret = FALSE; + } + + asmfunc_debug (label); + + asmfunc_state = WAITING_ENDASMFUNC; + } + + return ret; +} + /* Can't use symbol_new here, so have to create a symbol and then at a later date assign it a value. Thats what these functions do. */ @@ -3138,7 +3381,7 @@ symbol_locate (symbolS * symbolP, valueT valu, /* Symbol value. */ fragS * frag) /* Associated fragment. */ { - unsigned int name_length; + size_t name_length; char * preserved_copy_of_name; name_length = strlen (name) + 1; /* +1 for \0. */ @@ -3179,7 +3422,6 @@ symbol_locate (symbolS * symbolP, #endif /* DEBUG_SYMS */ } - static void s_ltorg (int ignored ATTRIBUTE_UNUSED) { @@ -3193,15 +3435,17 @@ s_ltorg (int ignored ATTRIBUTE_UNUSED) || pool->next_free_entry == 0) return; - mapping_state (MAP_DATA); - /* Align pool as you have word accesses. Only make a frag if we have to. */ if (!need_pass_2) - frag_align (2, 0, 0); + frag_align (pool->alignment, 0, 0); record_alignment (now_seg, 2); +#ifdef OBJ_ELF + seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA; + make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now); +#endif sprintf (sym_name, "$$lit_\002%x", pool->id); symbol_locate (pool->symbol, sym_name, now_seg, @@ -3221,7 +3465,8 @@ s_ltorg (int ignored ATTRIBUTE_UNUSED) dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry); #endif /* First output the expression in the instruction to the pool. */ - emit_expr (&(pool->literals[entry]), 4); /* .word */ + emit_expr (&(pool->literals[entry]), + pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK); } /* Mark the pool as empty. */ @@ -3316,7 +3561,8 @@ s_arm_elf_cons (int nbytes) memcpy (base, save_buf, p - base); offset = nbytes - size; - p = frag_more ((int) nbytes); + p = frag_more (nbytes); + memset (p, 0, nbytes); fix_new_exp (frag_now, p - frag_now->fr_literal + offset, size, &exp, 0, (enum bfd_reloc_code_real) reloc); } @@ -4134,15 +4380,24 @@ s_arm_unwind_save (int arch_v6) s_arm_unwind_save_fpa (reg->number); return; - case REG_TYPE_RN: s_arm_unwind_save_core (); return; + case REG_TYPE_RN: + s_arm_unwind_save_core (); + return; + case REG_TYPE_VFD: if (arch_v6) s_arm_unwind_save_vfp_armv6 (); else s_arm_unwind_save_vfp (); return; - case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return; - case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return; + + case REG_TYPE_MMXWR: + s_arm_unwind_save_mmxwr (); + return; + + case REG_TYPE_MMXWCG: + s_arm_unwind_save_mmxwcg (); + return; default: as_bad (_(".unwind_save does not support this kind of register")); @@ -4477,6 +4732,13 @@ const pseudo_typeS md_pseudo_table[] = #ifdef TE_PE {"secrel32", pe_directive_secrel, 0}, #endif + + /* These are for compatibility with CodeComposer Studio. */ + {"ref", s_ccs_ref, 0}, + {"def", s_ccs_def, 0}, + {"asmfunc", s_ccs_asmfunc, 0}, + {"endasmfunc", s_ccs_endasmfunc, 0}, + { 0, 0, 0 } }; @@ -4515,28 +4777,31 @@ parse_immediate (char **str, int *val, int min, int max, instructions. Puts the result directly in inst.operands[i]. */ static int -parse_big_immediate (char **str, int i) +parse_big_immediate (char **str, int i, expressionS *in_exp, + bfd_boolean allow_symbol_p) { expressionS exp; + expressionS *exp_p = in_exp ? in_exp : &exp; char *ptr = *str; - my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG); + my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG); - if (exp.X_op == O_constant) + if (exp_p->X_op == O_constant) { - inst.operands[i].imm = exp.X_add_number & 0xffffffff; + inst.operands[i].imm = exp_p->X_add_number & 0xffffffff; /* If we're on a 64-bit host, then a 64-bit number can be returned using O_constant. We have to be careful not to break compilation for 32-bit X_add_number, though. */ - if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0) + if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0) { - /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */ - inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff; + /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */ + inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16) + & 0xffffffff); inst.operands[i].regisimm = 1; } } - else if (exp.X_op == O_big - && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32) + else if (exp_p->X_op == O_big + && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32) { unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0; @@ -4549,7 +4814,7 @@ parse_big_immediate (char **str, int i) PR 11972: Bignums can now be sign-extended to the size of a .octa so check that the out of range bits are all zero or all one. */ - if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64) + if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64) { LITTLENUM_TYPE m = -1; @@ -4557,7 +4822,7 @@ parse_big_immediate (char **str, int i) && generic_bignum[parts * 2] != m) return FAIL; - for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++) + for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++) if (generic_bignum[j] != generic_bignum[j-1]) return FAIL; } @@ -4572,7 +4837,7 @@ parse_big_immediate (char **str, int i) << (LITTLENUM_NUMBER_OF_BITS * j); inst.operands[i].regisimm = 1; } - else + else if (!(exp_p->X_op == O_symbol && allow_symbol_p)) return FAIL; *str = ptr; @@ -4681,6 +4946,31 @@ is_quarter_float (unsigned imm) return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0; } + +/* Detect the presence of a floating point or integer zero constant, + i.e. #0.0 or #0. */ + +static bfd_boolean +parse_ifimm_zero (char **in) +{ + int error_code; + + if (!is_immediate_prefix (**in)) + return FALSE; + + ++*in; + error_code = atof_generic (in, ".", EXP_CHARS, + &generic_floating_point_number); + + if (!error_code + && generic_floating_point_number.sign == '+' + && (generic_floating_point_number.low + > generic_floating_point_number.leader)) + return TRUE; + + return FALSE; +} + /* Parse an 8-bit "quarter-precision" floating point number of the form: 0baBbbbbbc defgh000 00000000 00000000. The zero and minus-zero cases need special handling, since they can't be @@ -5165,10 +5455,12 @@ parse_address_main (char **str, int i, int group_relocations, inst.operands[i].reg = REG_PC; inst.operands[i].isreg = 1; inst.operands[i].preind = 1; - } - /* Otherwise a load-constant pseudo op, no special treatment needed here. */ - if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) + if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG)) + return PARSE_OPERAND_FAIL; + } + else if (parse_big_immediate (&p, i, &inst.reloc.exp, + /*allow_symbol_p=*/TRUE)) return PARSE_OPERAND_FAIL; *str = p; @@ -5998,7 +6290,8 @@ parse_neon_mov (char **str, int *which_operand) Case 10: VMOV.F32 , # Case 11: VMOV.F64
, # */ inst.operands[i].immisfloat = 1; - else if (parse_big_immediate (&ptr, i) == SUCCESS) + else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE) + == SUCCESS) /* Case 2: VMOV.
, # Case 3: VMOV.
, # */ ; @@ -6151,6 +6444,7 @@ enum operand_parse_code OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ + OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */ OP_RR_RNSC, /* ARM reg or Neon scalar. */ OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ @@ -6434,6 +6728,22 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) po_reg_or_goto (REG_TYPE_VFSD, try_imm0); break; + case OP_RSVD_FI0: + { + po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0); + break; + try_ifimm0: + if (parse_ifimm_zero (&str)) + inst.operands[i].imm = 0; + else + { + inst.error + = _("only floating point zero is allowed as immediate value"); + goto failure; + } + } + break; + case OP_RR_RNSC: { po_scalar_or_goto (8, try_rr); @@ -6483,7 +6793,8 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) try_immbig: /* There's a possibility of getting a 64-bit immediate here, so we need special handling. */ - if (parse_big_immediate (&str, i) == FAIL) + if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE) + == FAIL) { inst.error = _("immediate value is out of range"); goto failure; @@ -7224,71 +7535,204 @@ encode_arm_addr_mode_3 (int i, bfd_boolean is_t) } } -/* inst.operands[i] was set up by parse_address. Encode it into an - ARM-format instruction. Reject all forms which cannot be encoded - into a coprocessor load/store instruction. If wb_ok is false, - reject use of writeback; if unind_ok is false, reject use of - unindexed addressing. If reloc_override is not 0, use it instead - of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one - (in which case it is preserved). */ +/* Write immediate bits [7:0] to the following locations: + + |28/24|23 19|18 16|15 4|3 0| + | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| + + This function is used by VMOV/VMVN/VORR/VBIC. */ + +static void +neon_write_immbits (unsigned immbits) +{ + inst.instruction |= immbits & 0xf; + inst.instruction |= ((immbits >> 4) & 0x7) << 16; + inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24); +} + +/* Invert low-order SIZE bits of XHI:XLO. */ + +static void +neon_invert_size (unsigned *xlo, unsigned *xhi, int size) +{ + unsigned immlo = xlo ? *xlo : 0; + unsigned immhi = xhi ? *xhi : 0; + + switch (size) + { + case 8: + immlo = (~immlo) & 0xff; + break; + + case 16: + immlo = (~immlo) & 0xffff; + break; + + case 64: + immhi = (~immhi) & 0xffffffff; + /* fall through. */ + + case 32: + immlo = (~immlo) & 0xffffffff; + break; + + default: + abort (); + } + + if (xlo) + *xlo = immlo; + + if (xhi) + *xhi = immhi; +} + +/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits + A, B, C, D. */ static int -encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) +neon_bits_same_in_bytes (unsigned imm) { - inst.instruction |= inst.operands[i].reg << 16; + return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) + && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) + && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) + && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); +} - gas_assert (!(inst.operands[i].preind && inst.operands[i].postind)); +/* For immediate of above form, return 0bABCD. */ - if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ +static unsigned +neon_squash_bits (unsigned imm) +{ + return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) + | ((imm & 0x01000000) >> 21); +} + +/* Compress quarter-float representation to 0b...000 abcdefgh. */ + +static unsigned +neon_qfloat_bits (unsigned imm) +{ + return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); +} + +/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into + the instruction. *OP is passed as the initial value of the op field, and + may be set to a different value depending on the constant (i.e. + "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not + MVN). If the immediate looks like a repeated pattern then also + try smaller element sizes. */ + +static int +neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p, + unsigned *immbits, int *op, int size, + enum neon_el_type type) +{ + /* Only permit float immediates (including 0.0/-0.0) if the operand type is + float. */ + if (type == NT_float && !float_p) + return FAIL; + + if (type == NT_float && is_quarter_float (immlo) && immhi == 0) { - gas_assert (!inst.operands[i].writeback); - if (!unind_ok) + if (size != 32 || *op == 1) + return FAIL; + *immbits = neon_qfloat_bits (immlo); + return 0xf; + } + + if (size == 64) + { + if (neon_bits_same_in_bytes (immhi) + && neon_bits_same_in_bytes (immlo)) { - inst.error = _("instruction does not support unindexed addressing"); - return FAIL; + if (*op == 1) + return FAIL; + *immbits = (neon_squash_bits (immhi) << 4) + | neon_squash_bits (immlo); + *op = 1; + return 0xe; } - inst.instruction |= inst.operands[i].imm; - inst.instruction |= INDEX_UP; - return SUCCESS; - } - if (inst.operands[i].preind) - inst.instruction |= PRE_INDEX; + if (immhi != immlo) + return FAIL; + } - if (inst.operands[i].writeback) + if (size >= 32) { - if (inst.operands[i].reg == REG_PC) + if (immlo == (immlo & 0x000000ff)) { - inst.error = _("pc may not be used with write-back"); - return FAIL; + *immbits = immlo; + return 0x0; } - if (!wb_ok) + else if (immlo == (immlo & 0x0000ff00)) { - inst.error = _("instruction does not support writeback"); - return FAIL; + *immbits = immlo >> 8; + return 0x2; } - inst.instruction |= WRITE_BACK; + else if (immlo == (immlo & 0x00ff0000)) + { + *immbits = immlo >> 16; + return 0x4; + } + else if (immlo == (immlo & 0xff000000)) + { + *immbits = immlo >> 24; + return 0x6; + } + else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) + { + *immbits = (immlo >> 8) & 0xff; + return 0xc; + } + else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) + { + *immbits = (immlo >> 16) & 0xff; + return 0xd; + } + + if ((immlo & 0xffff) != (immlo >> 16)) + return FAIL; + immlo &= 0xffff; } - if (reloc_override) - inst.reloc.type = (bfd_reloc_code_real_type) reloc_override; - else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC - || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2) - && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0) + if (size >= 16) { - if (thumb_mode) - inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM; - else - inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM; + if (immlo == (immlo & 0x000000ff)) + { + *immbits = immlo; + return 0x8; + } + else if (immlo == (immlo & 0x0000ff00)) + { + *immbits = immlo >> 8; + return 0xa; + } + + if ((immlo & 0xff) != (immlo >> 8)) + return FAIL; + immlo &= 0xff; } - /* Prefer + for zero encoded value. */ - if (!inst.operands[i].negative) - inst.instruction |= INDEX_UP; + if (immlo == (immlo & 0x000000ff)) + { + /* Don't allow MVN with 8-bit immediate. */ + if (*op == 1) + return FAIL; + *immbits = immlo; + return 0xe; + } - return SUCCESS; + return FAIL; } +enum lit_type +{ + CONST_THUMB, + CONST_ARM, + CONST_VEC +}; + /* inst.reloc.exp describes an "=expr" load pseudo-operation. Determine whether it can be performed with a move instruction; if it can, convert inst.instruction to that move instruction and @@ -7299,9 +7743,12 @@ encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) inst.operands[i] describes the destination register. */ static bfd_boolean -move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) +move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3) { unsigned long tbit; + bfd_boolean thumb_p = (t == CONST_THUMB); + bfd_boolean arm_p = (t == CONST_ARM); + bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle; if (thumb_p) tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT; @@ -7313,14 +7760,18 @@ move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) inst.error = _("invalid pseudo operation"); return TRUE; } - if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol) + if (inst.reloc.exp.X_op != O_constant + && inst.reloc.exp.X_op != O_symbol + && inst.reloc.exp.X_op != O_big) { inst.error = _("constant expression expected"); return TRUE; } - if (inst.reloc.exp.X_op == O_constant) + if ((inst.reloc.exp.X_op == O_constant + || inst.reloc.exp.X_op == O_big) + && !inst.operands[i].issingle) { - if (thumb_p) + if (thumb_p && inst.reloc.exp.X_op == O_constant) { if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0) { @@ -7330,7 +7781,7 @@ move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) return TRUE; } } - else + else if (arm_p && inst.reloc.exp.X_op == O_constant) { int value = encode_arm_immediate (inst.reloc.exp.X_add_number); if (value != FAIL) @@ -7352,23 +7803,130 @@ move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) return TRUE; } } + else if (vec64_p) + { + int op = 0; + unsigned immbits = 0; + unsigned immlo = inst.operands[1].imm; + unsigned immhi = inst.operands[1].regisimm + ? inst.operands[1].reg + : inst.reloc.exp.X_unsigned + ? 0 + : ((bfd_int64_t)((int) immlo)) >> 32; + int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits, + &op, 64, NT_invtype); + + if (cmode == FAIL) + { + neon_invert_size (&immlo, &immhi, 64); + op = !op; + cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits, + &op, 64, NT_invtype); + } + if (cmode != FAIL) + { + inst.instruction = (inst.instruction & VLDR_VMOV_SAME) + | (1 << 23) + | (cmode << 8) + | (op << 5) + | (1 << 4); + /* Fill other bits in vmov encoding for both thumb and arm. */ + if (thumb_mode) + inst.instruction |= (0x7 << 29) | (0xF << 24); + else + inst.instruction |= (0xF << 28) | (0x1 << 25); + neon_write_immbits (immbits); + return TRUE; + } + } + } + + if (add_to_lit_pool ((!inst.operands[i].isvec + || inst.operands[i].issingle) ? 4 : 8) == FAIL) + return TRUE; + + inst.operands[1].reg = REG_PC; + inst.operands[1].isreg = 1; + inst.operands[1].preind = 1; + inst.reloc.pc_rel = 1; + inst.reloc.type = (thumb_p + ? BFD_RELOC_ARM_THUMB_OFFSET + : (mode_3 + ? BFD_RELOC_ARM_HWLITERAL + : BFD_RELOC_ARM_LITERAL)); + return FALSE; +} + +/* inst.operands[i] was set up by parse_address. Encode it into an + ARM-format instruction. Reject all forms which cannot be encoded + into a coprocessor load/store instruction. If wb_ok is false, + reject use of writeback; if unind_ok is false, reject use of + unindexed addressing. If reloc_override is not 0, use it instead + of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one + (in which case it is preserved). */ + +static int +encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) +{ + if (!inst.operands[i].isreg) + { + gas_assert (inst.operands[0].isvec); + if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE)) + return SUCCESS; + } + + inst.instruction |= inst.operands[i].reg << 16; + + gas_assert (!(inst.operands[i].preind && inst.operands[i].postind)); + + if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ + { + gas_assert (!inst.operands[i].writeback); + if (!unind_ok) + { + inst.error = _("instruction does not support unindexed addressing"); + return FAIL; + } + inst.instruction |= inst.operands[i].imm; + inst.instruction |= INDEX_UP; + return SUCCESS; + } + + if (inst.operands[i].preind) + inst.instruction |= PRE_INDEX; + + if (inst.operands[i].writeback) + { + if (inst.operands[i].reg == REG_PC) + { + inst.error = _("pc may not be used with write-back"); + return FAIL; + } + if (!wb_ok) + { + inst.error = _("instruction does not support writeback"); + return FAIL; + } + inst.instruction |= WRITE_BACK; } - if (add_to_lit_pool () == FAIL) + if (reloc_override) + inst.reloc.type = (bfd_reloc_code_real_type) reloc_override; + else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC + || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2) + && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0) { - inst.error = _("literal pool insertion failed"); - return TRUE; + if (thumb_mode) + inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM; + else + inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM; } - inst.operands[1].reg = REG_PC; - inst.operands[1].isreg = 1; - inst.operands[1].preind = 1; - inst.reloc.pc_rel = 1; - inst.reloc.type = (thumb_p - ? BFD_RELOC_ARM_THUMB_OFFSET - : (mode_3 - ? BFD_RELOC_ARM_HWLITERAL - : BFD_RELOC_ARM_LITERAL)); - return FALSE; + + /* Prefer + for zero encoded value. */ + if (!inst.operands[i].negative) + inst.instruction |= INDEX_UP; + + return SUCCESS; } /* Functions for instruction encoding, sorted by sub-architecture. @@ -8101,7 +8659,7 @@ do_ldst (void) { inst.instruction |= inst.operands[0].reg << 12; if (!inst.operands[1].isreg) - if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE)) + if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE)) return; encode_arm_addr_mode_2 (1, /*is_t=*/FALSE); check_ldr_r15_aligned (); @@ -8134,7 +8692,7 @@ do_ldstv4 (void) constraint (inst.operands[0].reg == REG_PC, BAD_PC); inst.instruction |= inst.operands[0].reg << 12; if (!inst.operands[1].isreg) - if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE)) + if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE)) return; encode_arm_addr_mode_3 (1, /*is_t=*/FALSE); } @@ -10678,7 +11236,7 @@ do_t_ldst (void) { if (opcode <= 0xffff) inst.instruction = THUMB_OP32 (opcode); - if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) + if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE)) return; } if (inst.operands[1].isreg @@ -10784,7 +11342,7 @@ do_t_ldst (void) inst.instruction = THUMB_OP16 (inst.instruction); if (!inst.operands[1].isreg) - if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) + if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE)) return; constraint (!inst.operands[1].preind @@ -11130,16 +11688,9 @@ do_t_mov_cmp (void) results. Don't allow this. */ if (low_regs) { -/* Silence this error for now because clang generates "MOV" two low regs in - unified syntax for thumb1, and expects CPSR are not affected. This check - doesn't exist in binutils-2.21 with gcc 4.6. The thumb1 code generated by - clang will continue to have problem running on v5t but not on v6 and beyond. -*/ -#if 0 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6), "MOV Rd, Rs with two low registers is not " "permitted on this architecture"); -#endif ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_v6); } @@ -13672,197 +14223,6 @@ neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size) return FAIL; } -/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits - A, B, C, D. */ - -static int -neon_bits_same_in_bytes (unsigned imm) -{ - return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) - && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) - && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) - && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); -} - -/* For immediate of above form, return 0bABCD. */ - -static unsigned -neon_squash_bits (unsigned imm) -{ - return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) - | ((imm & 0x01000000) >> 21); -} - -/* Compress quarter-float representation to 0b...000 abcdefgh. */ - -static unsigned -neon_qfloat_bits (unsigned imm) -{ - return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); -} - -/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into - the instruction. *OP is passed as the initial value of the op field, and - may be set to a different value depending on the constant (i.e. - "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not - MVN). If the immediate looks like a repeated pattern then also - try smaller element sizes. */ - -static int -neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p, - unsigned *immbits, int *op, int size, - enum neon_el_type type) -{ - /* Only permit float immediates (including 0.0/-0.0) if the operand type is - float. */ - if (type == NT_float && !float_p) - return FAIL; - - if (type == NT_float && is_quarter_float (immlo) && immhi == 0) - { - if (size != 32 || *op == 1) - return FAIL; - *immbits = neon_qfloat_bits (immlo); - return 0xf; - } - - if (size == 64) - { - if (neon_bits_same_in_bytes (immhi) - && neon_bits_same_in_bytes (immlo)) - { - if (*op == 1) - return FAIL; - *immbits = (neon_squash_bits (immhi) << 4) - | neon_squash_bits (immlo); - *op = 1; - return 0xe; - } - - if (immhi != immlo) - return FAIL; - } - - if (size >= 32) - { - if (immlo == (immlo & 0x000000ff)) - { - *immbits = immlo; - return 0x0; - } - else if (immlo == (immlo & 0x0000ff00)) - { - *immbits = immlo >> 8; - return 0x2; - } - else if (immlo == (immlo & 0x00ff0000)) - { - *immbits = immlo >> 16; - return 0x4; - } - else if (immlo == (immlo & 0xff000000)) - { - *immbits = immlo >> 24; - return 0x6; - } - else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) - { - *immbits = (immlo >> 8) & 0xff; - return 0xc; - } - else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) - { - *immbits = (immlo >> 16) & 0xff; - return 0xd; - } - - if ((immlo & 0xffff) != (immlo >> 16)) - return FAIL; - immlo &= 0xffff; - } - - if (size >= 16) - { - if (immlo == (immlo & 0x000000ff)) - { - *immbits = immlo; - return 0x8; - } - else if (immlo == (immlo & 0x0000ff00)) - { - *immbits = immlo >> 8; - return 0xa; - } - - if ((immlo & 0xff) != (immlo >> 8)) - return FAIL; - immlo &= 0xff; - } - - if (immlo == (immlo & 0x000000ff)) - { - /* Don't allow MVN with 8-bit immediate. */ - if (*op == 1) - return FAIL; - *immbits = immlo; - return 0xe; - } - - return FAIL; -} - -/* Write immediate bits [7:0] to the following locations: - - |28/24|23 19|18 16|15 4|3 0| - | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| - - This function is used by VMOV/VMVN/VORR/VBIC. */ - -static void -neon_write_immbits (unsigned immbits) -{ - inst.instruction |= immbits & 0xf; - inst.instruction |= ((immbits >> 4) & 0x7) << 16; - inst.instruction |= ((immbits >> 7) & 0x1) << 24; -} - -/* Invert low-order SIZE bits of XHI:XLO. */ - -static void -neon_invert_size (unsigned *xlo, unsigned *xhi, int size) -{ - unsigned immlo = xlo ? *xlo : 0; - unsigned immhi = xhi ? *xhi : 0; - - switch (size) - { - case 8: - immlo = (~immlo) & 0xff; - break; - - case 16: - immlo = (~immlo) & 0xffff; - break; - - case 64: - immhi = (~immhi) & 0xffffffff; - /* fall through. */ - - case 32: - immlo = (~immlo) & 0xffffffff; - break; - - default: - abort (); - } - - if (xlo) - *xlo = immlo; - - if (xhi) - *xhi = immhi; -} - static void do_neon_logic (void) { @@ -14678,7 +15038,7 @@ do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour, { case neon_cvt_flavour_s32_f64: sz = 1; - op = 0; + op = 1; break; case neon_cvt_flavour_s32_f32: sz = 0; @@ -17119,6 +17479,9 @@ static const struct depr_insn_mask depr_it_insns[] = { { 0x4800, 0xf800, N_("Literal loads") }, { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") }, { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") }, + /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue' + field in asm_opcode. 'tvalue' is used at the stage this check happen. */ + { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") }, { 0, 0, NULL } }; @@ -19224,8 +19587,8 @@ static const struct asm_opcode insns[] = nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), - nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), - nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), + nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp), + nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp), NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push), NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop), NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz), @@ -20485,7 +20848,8 @@ arm_handle_align (fragS * fragP) if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED)) { - if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)) + if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0] + ? selected_cpu : arm_arch_none, arm_ext_v6t2)) { narrow_noop = thumb_noop[1][target_big_endian]; noop = wide_thumb_noop[target_big_endian]; @@ -20499,7 +20863,9 @@ arm_handle_align (fragS * fragP) } else { - noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0] + noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0] + ? selected_cpu : arm_arch_none, + arm_ext_v6k) != 0] [target_big_endian]; noop_size = 4; #ifdef OBJ_ELF @@ -20848,7 +21214,7 @@ start_unwind_section (const segT text_seg, int idx) /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional personality routine data. Returns zero, or the index table value for - and inline entry. */ + an inline entry. */ static valueT create_unwind_entry (int have_data) @@ -20919,7 +21285,12 @@ create_unwind_entry (int have_data) } else { - gas_assert (unwind.personality_index == -1); + /* PR 16765: Missing or misplaced unwind directives can trigger this. */ + if (unwind.personality_index != -1) + { + as_bad (_("attempt to recreate an unwind entry")); + return 1; + } /* An extra byte is required for the opcode count. */ size = unwind.opcode_count + 1; @@ -21026,11 +21397,19 @@ int tc_arm_regname_to_dw2regnum (char *regname) { int reg = arm_reg_parse (®name, REG_TYPE_RN); + if (reg != FAIL) + return reg; - if (reg == FAIL) - return -1; + /* PR 16694: Allow VFP registers as well. */ + reg = arm_reg_parse (®name, REG_TYPE_VFS); + if (reg != FAIL) + return 64 + reg; - return reg; + reg = arm_reg_parse (®name, REG_TYPE_VFD); + if (reg != FAIL) + return reg + 256; + + return -1; } #ifdef TE_PE @@ -22225,7 +22604,7 @@ md_apply_fix (fixS * fixP, case BFD_RELOC_8: if (fixP->fx_done || !seg->use_rela_p) - md_number_to_chars (buf, value, 1); + *buf = value; break; case BFD_RELOC_16: @@ -22238,9 +22617,6 @@ md_apply_fix (fixS * fixP, case BFD_RELOC_ARM_THM_TLS_CALL: case BFD_RELOC_ARM_TLS_DESCSEQ: case BFD_RELOC_ARM_THM_TLS_DESCSEQ: - S_SET_THREAD_LOCAL (fixP->fx_addsy); - break; - case BFD_RELOC_ARM_TLS_GOTDESC: case BFD_RELOC_ARM_TLS_GD32: case BFD_RELOC_ARM_TLS_LE32: @@ -22248,12 +22624,10 @@ md_apply_fix (fixS * fixP, case BFD_RELOC_ARM_TLS_LDM32: case BFD_RELOC_ARM_TLS_LDO32: S_SET_THREAD_LOCAL (fixP->fx_addsy); - /* fall through */ + break; case BFD_RELOC_ARM_GOT32: case BFD_RELOC_ARM_GOTOFF: - if (fixP->fx_done || !seg->use_rela_p) - md_number_to_chars (buf, 0, 4); break; case BFD_RELOC_ARM_GOT_PREL: @@ -23002,9 +23376,9 @@ void cons_fix_new_arm (fragS * frag, int where, int size, - expressionS * exp) + expressionS * exp, + bfd_reloc_code_real_type reloc) { - bfd_reloc_code_real_type type; int pcrel = 0; /* Pick a reloc. @@ -23012,17 +23386,17 @@ cons_fix_new_arm (fragS * frag, switch (size) { case 1: - type = BFD_RELOC_8; + reloc = BFD_RELOC_8; break; case 2: - type = BFD_RELOC_16; + reloc = BFD_RELOC_16; break; case 4: default: - type = BFD_RELOC_32; + reloc = BFD_RELOC_32; break; case 8: - type = BFD_RELOC_64; + reloc = BFD_RELOC_64; break; } @@ -23030,11 +23404,11 @@ cons_fix_new_arm (fragS * frag, if (exp->X_op == O_secrel) { exp->X_op = O_symbol; - type = BFD_RELOC_32_SECREL; + reloc = BFD_RELOC_32_SECREL; } #endif - fix_new_exp (frag, where, (int) size, exp, pcrel, type); + fix_new_exp (frag, where, size, exp, pcrel, reloc); } #if defined (OBJ_COFF) @@ -24004,8 +24378,7 @@ static const struct arm_cpu_option_table arm_cpus[] = ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL), ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC, FPU_NONE, "Cortex-A5"), - ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT, - FPU_ARCH_NEON_VFP_V4, + ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4, "Cortex-A7"), ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC, ARM_FEATURE (0, FPU_VFP_V3 @@ -24015,12 +24388,12 @@ static const struct arm_cpu_option_table arm_cpus[] = ARM_FEATURE (0, FPU_VFP_V3 | FPU_NEON_EXT_V1), "Cortex-A9"), - ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT, - FPU_ARCH_NEON_VFP_V4, + ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4, "Cortex-A12"), - ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT, - FPU_ARCH_NEON_VFP_V4, + ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4, "Cortex-A15"), + ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4, + "Cortex-A17"), ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, "Cortex-A53"), ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, @@ -24100,6 +24473,7 @@ static const struct arm_arch_option_table arm_archs[] = /* The official spelling of the ARMv7 profile variants is the dashed form. Accept the non-dashed form for compatibility with old toolchains. */ ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP), + ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP), ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP), ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP), ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP), @@ -24515,6 +24889,15 @@ arm_parse_it_mode (char * str) return ret; } +static bfd_boolean +arm_ccs_mode (char * unused ATTRIBUTE_UNUSED) +{ + codecomposer_syntax = TRUE; + arm_comment_chars[0] = ';'; + arm_line_separator_chars[0] = 0; + return TRUE; +} + struct arm_long_option_table arm_long_opts[] = { {"mcpu=", N_("\t assemble for CPU "), @@ -24531,6 +24914,8 @@ struct arm_long_option_table arm_long_opts[] = #endif {"mimplicit-it=", N_("\t controls implicit insertion of IT instructions"), arm_parse_it_mode, NULL}, + {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"), + arm_ccs_mode, NULL}, {NULL, NULL, 0, NULL} }; @@ -24679,7 +25064,7 @@ static const cpu_arch_ver_table cpu_arch_ver[] = {11, ARM_ARCH_V6M}, {12, ARM_ARCH_V6SM}, {8, ARM_ARCH_V6T2}, - {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT}, + {10, ARM_ARCH_V7VE}, {10, ARM_ARCH_V7R}, {10, ARM_ARCH_V7M}, {14, ARM_ARCH_V8A}, @@ -24729,6 +25114,8 @@ aeabi_set_public_attributes (void) if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any)) ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t); + selected_cpu = flags; + /* Allow the user to override the reported architecture. */ if (object_arch) { -- cgit v1.2.3