Changeset 1973 for binutils/trunk/gas/config/tc-arm.c
- Timestamp:
- Feb 6, 2017, 1:00:00 PM (8 years ago)
- Location:
- binutils/trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
binutils/trunk ¶
-
Property svn:mergeinfo
set to
/binutils/vendor/current merged eligible
-
Property svn:mergeinfo
set to
-
TabularUnified binutils/trunk/gas/config/tc-arm.c ¶
r970 r1973 1 1 /* tc-arm.c -- Assemble for the ARM 2 Copyright (C) 1994-201 4Free Software Foundation, Inc.2 Copyright (C) 1994-2016 Free Software Foundation, Inc. 3 3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) 4 4 Modified by David Taylor (dtaylor@armltd.co.uk) … … 156 156 /* Constants for known architecture features. */ 157 157 static const arm_feature_set fpu_default = FPU_DEFAULT; 158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1; 159 159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2; 160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3; 161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1; 162 162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA; 163 163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD; 164 #ifdef OBJ_ELF 164 165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK; 166 #endif 165 167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE; 166 168 … … 169 171 #endif 170 172 171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1); 174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1); 175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S); 176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3); 177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M); 178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4); 179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T); 180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5); 179 181 static const arm_feature_set arm_ext_v4t_5 = 180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0); 181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0); 182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0); 183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0); 184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0); 185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0); 186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0); 187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0); 188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0); 189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0); 190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0); 191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0); 192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0); 193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0); 194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0); 195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0); 196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0); 197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0); 198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0); 182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5); 183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T); 184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E); 185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP); 186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J); 187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6); 188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K); 189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2); 190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M); 191 static const arm_feature_set arm_ext_v6_notm = 192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM); 193 static const arm_feature_set arm_ext_v6_dsp = 194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP); 195 static const arm_feature_set arm_ext_barrier = 196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER); 197 static const arm_feature_set arm_ext_msr = 198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR); 199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV); 200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7); 201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A); 202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R); 203 #ifdef OBJ_ELF 204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M); 205 #endif 206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8); 199 207 static const arm_feature_set arm_ext_m = 200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0); 201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0); 202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0); 203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0); 204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0); 205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0); 208 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 209 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN); 210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP); 211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC); 212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS); 213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV); 214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT); 215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN); 216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M); 217 static const arm_feature_set arm_ext_v8m_main = 218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN); 219 /* Instructions in ARMv8-M only found in M profile architectures. */ 220 static const arm_feature_set arm_ext_v8m_m_only = 221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN); 222 static const arm_feature_set arm_ext_v6t2_v8m = 223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M); 224 /* Instructions shared between ARMv8-A and ARMv8-M. */ 225 static const arm_feature_set arm_ext_atomics = 226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS); 227 #ifdef OBJ_ELF 228 /* DSP instructions Tag_DSP_extension refers to. */ 229 static const arm_feature_set arm_ext_dsp = 230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP); 231 #endif 232 static const arm_feature_set arm_ext_ras = 233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS); 234 /* FP16 instructions. */ 235 static const arm_feature_set arm_ext_fp16 = 236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST); 206 237 207 238 static const arm_feature_set arm_arch_any = ARM_ANY; 208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1); 209 240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2; 210 241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE; 242 #ifdef OBJ_ELF 211 243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY; 244 #endif 212 245 213 246 static const arm_feature_set arm_cext_iwmmxt2 = 214 ARM_FEATURE (0,ARM_CEXT_IWMMXT2);247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2); 215 248 static const arm_feature_set arm_cext_iwmmxt = 216 ARM_FEATURE (0,ARM_CEXT_IWMMXT);249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT); 217 250 static const arm_feature_set arm_cext_xscale = 218 ARM_FEATURE (0,ARM_CEXT_XSCALE);251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE); 219 252 static const arm_feature_set arm_cext_maverick = 220 ARM_FEATURE (0, ARM_CEXT_MAVERICK); 221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1); 222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2); 253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK); 254 static const arm_feature_set fpu_fpa_ext_v1 = 255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1); 256 static const arm_feature_set fpu_fpa_ext_v2 = 257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2); 223 258 static const arm_feature_set fpu_vfp_ext_v1xd = 224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD); 225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1); 226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2); 227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD); 228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3); 259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD); 260 static const arm_feature_set fpu_vfp_ext_v1 = 261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1); 262 static const arm_feature_set fpu_vfp_ext_v2 = 263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2); 264 static const arm_feature_set fpu_vfp_ext_v3xd = 265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD); 266 static const arm_feature_set fpu_vfp_ext_v3 = 267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3); 229 268 static const arm_feature_set fpu_vfp_ext_d32 = 230 ARM_FEATURE (0, FPU_VFP_EXT_D32); 231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1); 269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32); 270 static const arm_feature_set fpu_neon_ext_v1 = 271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1); 232 272 static const arm_feature_set fpu_vfp_v3_or_neon_ext = 233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); 234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16); 235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA); 236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA); 273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); 274 #ifdef OBJ_ELF 275 static const arm_feature_set fpu_vfp_fp16 = 276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16); 277 static const arm_feature_set fpu_neon_ext_fma = 278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA); 279 #endif 280 static const arm_feature_set fpu_vfp_ext_fma = 281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA); 237 282 static const arm_feature_set fpu_vfp_ext_armv8 = 238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8); 283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8); 284 static const arm_feature_set fpu_vfp_ext_armv8xd = 285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD); 239 286 static const arm_feature_set fpu_neon_ext_armv8 = 240 ARM_FEATURE (0,FPU_NEON_EXT_ARMV8);287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8); 241 288 static const arm_feature_set fpu_crypto_ext_armv8 = 242 ARM_FEATURE (0,FPU_CRYPTO_EXT_ARMV8);289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8); 243 290 static const arm_feature_set crc_ext_armv8 = 244 ARM_FEATURE (0, CRC_EXT_ARMV8); 291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8); 292 static const arm_feature_set fpu_neon_ext_v8_1 = 293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA); 245 294 246 295 static int mfloat_abi_opt = -1; … … 248 297 static arm_feature_set selected_cpu = ARM_ARCH_NONE; 249 298 /* Must be long enough to hold any of the names in arm_cpus. */ 250 static char selected_cpu_name[ 16];299 static char selected_cpu_name[20]; 251 300 252 301 extern FLONUM_TYPE generic_floating_point_number; … … 256 305 no_cpu_selected (void) 257 306 { 258 return selected_cpu.core == arm_arch_none.core 259 && selected_cpu.coproc == arm_arch_none.coproc; 307 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none); 260 308 } 261 309 … … 488 536 struct reloc_entry 489 537 { 490 c har * name;538 const char * name; 491 539 bfd_reloc_code_real_type reloc; 492 540 }; … … 756 804 #define BAD_PC_WRITEBACK \ 757 805 _("cannot use writeback with PC-relative addressing") 758 #define BAD_RANGE _("branch out of range") 806 #define BAD_RANGE _("branch out of range") 807 #define BAD_FP16 _("selected processor does not support fp16 instruction") 759 808 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour") 809 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only") 760 810 761 811 static struct hash_control * arm_ops_hsh; … … 1047 1097 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */ 1048 1098 1049 c har *1099 const char * 1050 1100 md_atof (int type, char * litP, int * sizeP) 1051 1101 { … … 1958 2008 const char *const type_error = _("mismatched element/structure types in list"); 1959 2009 struct neon_typed_alias firsttype; 2010 firsttype.defined = 0; 2011 firsttype.eltype.type = NT_invtype; 2012 firsttype.eltype.size = -1; 2013 firsttype.index = -1; 1960 2014 1961 2015 if (skip_past_char (&ptr, '{') == SUCCESS) … … 2150 2204 2151 2205 name = xstrdup (str); 2152 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));2206 new_reg = XNEW (struct reg_entry); 2153 2207 2154 2208 new_reg->name = name; … … 2178 2232 if (atype) 2179 2233 { 2180 reg->neon = (struct neon_typed_alias *) 2181 xmalloc (sizeof (struct neon_typed_alias)); 2234 reg->neon = XNEW (struct neon_typed_alias); 2182 2235 *reg->neon = *atype; 2183 2236 } … … 2225 2278 #endif 2226 2279 2227 nbuf = (char *) alloca (nlen + 1); 2228 memcpy (nbuf, newname, nlen); 2229 nbuf[nlen] = '\0'; 2280 nbuf = xmemdup0 (newname, nlen); 2230 2281 2231 2282 /* Create aliases under the new name as stated; an all-lowercase … … 2249 2300 first .req. */ 2250 2301 if (insert_reg_alias (nbuf, old->number, old->type) == NULL) 2251 return TRUE; 2302 { 2303 free (nbuf); 2304 return TRUE; 2305 } 2252 2306 } 2253 2307 … … 2259 2313 } 2260 2314 2315 free (nbuf); 2261 2316 return TRUE; 2262 2317 } … … 2386 2441 #endif 2387 2442 2388 namebuf = (char *) alloca (namelen + 1); 2389 strncpy (namebuf, newname, namelen); 2390 namebuf[namelen] = '\0'; 2443 namebuf = xmemdup0 (newname, namelen); 2391 2444 2392 2445 insert_neon_reg_alias (namebuf, basereg->number, basetype, … … 2409 2462 typeinfo.defined != 0 ? &typeinfo : NULL); 2410 2463 2464 free (namebuf); 2411 2465 return TRUE; 2412 2466 } … … 2631 2685 emit some STATE bytes to the file. */ 2632 2686 2687 #define TRANSITION(from, to) (mapstate == (from) && state == (to)) 2633 2688 void 2634 2689 mapping_state (enum mstate state) 2635 2690 { 2636 2691 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 2637 2638 #define TRANSITION(from, to) (mapstate == (from) && state == (to))2639 2692 2640 2693 if (mapstate == state) … … 2660 2713 2661 2714 if (TRANSITION (MAP_UNDEFINED, MAP_DATA)) 2662 /* This case will be evaluated later in the next else. */2715 /* This case will be evaluated later. */ 2663 2716 return; 2664 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)2665 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))2666 {2667 /* Only add the symbol if the offset is > 0:2668 if we're at the first frag, check it's size > 0;2669 if we're not at the first frag, then for sure2670 the offset is > 0. */2671 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;2672 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);2673 2674 if (add_symbol)2675 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);2676 }2677 2717 2678 2718 mapping_state_2 (state, 0); 2679 #undef TRANSITION2680 2719 } 2681 2720 … … 2696 2735 return; 2697 2736 2737 if (TRANSITION (MAP_UNDEFINED, MAP_ARM) 2738 || TRANSITION (MAP_UNDEFINED, MAP_THUMB)) 2739 { 2740 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root; 2741 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0); 2742 2743 if (add_symbol) 2744 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first); 2745 } 2746 2698 2747 seg_info (now_seg)->tc_segment_info_data.mapstate = state; 2699 2748 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now); 2700 2749 } 2750 #undef TRANSITION 2701 2751 #else 2702 2752 #define mapping_state(x) ((void)0) … … 2728 2778 return symbolP; 2729 2779 2730 real_start = ACONCAT ((STUB_NAME, name, NULL));2780 real_start = concat (STUB_NAME, name, NULL); 2731 2781 new_target = symbol_find (real_start); 2782 free (real_start); 2732 2783 2733 2784 if (new_target == NULL) … … 2855 2906 This just grew, and could be parsed much more simply! 2856 2907 Dean - in haste. */ 2857 name = input_line_pointer; 2858 delim = get_symbol_end (); 2908 delim = get_symbol_name (& name); 2859 2909 end_name = input_line_pointer; 2860 *end_name = delim;2910 (void) restore_line_pointer (delim); 2861 2911 2862 2912 if (*input_line_pointer != ',') … … 2938 2988 char *name, delim; 2939 2989 2940 name = input_line_pointer; 2941 delim = get_symbol_end (); 2990 delim = get_symbol_name (& name); 2942 2991 2943 2992 if (!strcasecmp (name, "unified")) … … 2950 2999 return; 2951 3000 } 2952 *input_line_pointer = delim;3001 (void) restore_line_pointer (delim); 2953 3002 demand_empty_rest_of_line (); 2954 3003 } 2955 3004 2956 3005 /* Directives: sectioning and alignment. */ 2957 2958 /* Same as s_align_ptwo but align 0 => align 2. */2959 2960 static void2961 s_align (int unused ATTRIBUTE_UNUSED)2962 {2963 int temp;2964 bfd_boolean fill_p;2965 long temp_fill;2966 long max_alignment = 15;2967 2968 temp = get_absolute_expression ();2969 if (temp > max_alignment)2970 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);2971 else if (temp < 0)2972 {2973 as_bad (_("alignment negative. 0 assumed."));2974 temp = 0;2975 }2976 2977 if (*input_line_pointer == ',')2978 {2979 input_line_pointer++;2980 temp_fill = get_absolute_expression ();2981 fill_p = TRUE;2982 }2983 else2984 {2985 fill_p = FALSE;2986 temp_fill = 0;2987 }2988 2989 if (!temp)2990 temp = 2;2991 2992 /* Only make a frag if we HAVE to. */2993 if (temp && !need_pass_2)2994 {2995 if (!fill_p && subseg_text_p (now_seg))2996 frag_align_code (temp, 0);2997 else2998 frag_align (temp, (int) temp_fill, 0);2999 }3000 demand_empty_rest_of_line ();3001 3002 record_alignment (now_seg, temp);3003 }3004 3006 3005 3007 static void … … 3155 3157 { 3156 3158 /* Create a new pool. */ 3157 pool = (literal_pool *) xmalloc (sizeof (* pool));3159 pool = XNEW (literal_pool); 3158 3160 if (! pool) 3159 3161 return NULL; … … 3293 3295 3294 3296 pool->literals[entry] = inst.reloc.exp; 3297 pool->literals[entry].X_op = O_constant; 3295 3298 pool->literals[entry].X_add_number = 0; 3296 3299 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4; … … 3347 3350 3348 3351 bfd_boolean 3349 tc_start_label_without_colon ( char unused1 ATTRIBUTE_UNUSED, const char * rest)3352 tc_start_label_without_colon (void) 3350 3353 { 3351 3354 bfd_boolean ret = TRUE; … … 3353 3356 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME) 3354 3357 { 3355 const char *label = rest;3358 const char *label = input_line_pointer; 3356 3359 3357 3360 while (!is_end_of_line[(int) label[-1]]) … … 3552 3555 char *p = input_line_pointer; 3553 3556 int offset; 3554 char *save_buf = (char *) alloca (input_line_pointer - base); 3557 char *save_buf = XNEWVEC (char, input_line_pointer - base); 3558 3555 3559 memcpy (save_buf, base, input_line_pointer - base); 3556 3560 memmove (base + (input_line_pointer - before_reloc), … … 3566 3570 fix_new_exp (frag_now, p - frag_now->fr_literal + offset, 3567 3571 size, &exp, 0, (enum bfd_reloc_code_real) reloc); 3572 free (save_buf); 3568 3573 } 3569 3574 } … … 3914 3919 as_bad (_("duplicate .personality directive")); 3915 3920 3916 name = input_line_pointer; 3917 c = get_symbol_end (); 3921 c = get_symbol_name (& name); 3918 3922 p = input_line_pointer; 3923 if (c == '"') 3924 ++ input_line_pointer; 3919 3925 unwind.personality_routine = symbol_find_or_make (name); 3920 3926 *p = c; … … 4678 4684 { "unreq", s_unreq, 0 }, 4679 4685 { "bss", s_bss, 0 }, 4680 { "align", s_align , 0},4686 { "align", s_align_ptwo, 2 }, 4681 4687 { "arm", s_arm, 0 }, 4682 4688 { "thumb", s_thumb, 0 }, … … 4912 4918 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it. 4913 4919 Ditto for 15. */ 4914 if (gen_to_words (words, 5, (long) 15) == 0) 4920 #define X_PRECISION 5 4921 #define E_PRECISION 15L 4922 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0) 4915 4923 { 4916 4924 for (i = 0; i < NUM_FLOAT_VALS; i++) … … 4961 4969 4962 4970 ++*in; 4971 4972 /* Accept #0x0 as a synonym for #0. */ 4973 if (strncmp (*in, "0x", 2) == 0) 4974 { 4975 int val; 4976 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL) 4977 return FALSE; 4978 return TRUE; 4979 } 4980 4963 4981 error_code = atof_generic (in, ".", EXP_CHARS, 4964 4982 &generic_floating_point_number); … … 5297 5315 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */ 5298 5316 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */ 5299 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */ 5317 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */ 5318 /* Absolute thumb alu relocations. */ 5319 { "lower0_7", 5320 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */ 5321 0, /* LDR. */ 5322 0, /* LDRS. */ 5323 0 }, /* LDC. */ 5324 { "lower8_15", 5325 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */ 5326 0, /* LDR. */ 5327 0, /* LDRS. */ 5328 0 }, /* LDC. */ 5329 { "upper0_7", 5330 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */ 5331 0, /* LDR. */ 5332 0, /* LDRS. */ 5333 0 }, /* LDC. */ 5334 { "upper8_15", 5335 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */ 5336 0, /* LDR. */ 5337 0, /* LDRS. */ 5338 0 } }; /* LDC. */ 5300 5339 5301 5340 /* Given the address of a pointer pointing to the textual name of a group … … 5763 5802 be TRUE, but we want to ignore it in this case as we are building for any 5764 5803 CPU type, including non-m variants. */ 5765 if ( selected_cpu.core == arm_arch_any.core)5804 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any)) 5766 5805 m_profile = FALSE; 5767 5806 … … 6076 6115 } 6077 6116 6117 /* Record a use of the given feature. */ 6118 static void 6119 record_feature_use (const arm_feature_set *feature) 6120 { 6121 if (thumb_mode) 6122 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature); 6123 else 6124 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature); 6125 } 6126 6078 6127 /* If the given feature available in the selected CPU, mark it as used. 6079 6128 Returns TRUE iff feature is available. */ … … 6087 6136 /* Add the appropriate architecture feature for the barrier option used. 6088 6137 */ 6089 if (thumb_mode) 6090 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature); 6091 else 6092 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature); 6138 record_feature_use (feature); 6093 6139 6094 6140 return TRUE; … … 7032 7078 if (*str == '^') 7033 7079 { 7034 inst.operands[ 1].writeback = 1;7080 inst.operands[i].writeback = 1; 7035 7081 str++; 7036 7082 } … … 7248 7294 do \ 7249 7295 if (warn_on_deprecated && reg == REG_SP) \ 7250 as_ warn(_("use of r13 is deprecated")); \7296 as_tsktsk (_("use of r13 is deprecated")); \ 7251 7297 while (0) 7252 7298 7253 7299 /* Functions for operand encoding. ARM, then Thumb. */ 7254 7300 7255 #define rotate_left(v, n) (v << n | v >> (32 - n)) 7301 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31)) 7302 7303 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding. 7304 7305 The only binary encoding difference is the Coprocessor number. Coprocessor 7306 9 is used for half-precision calculations or conversions. The format of the 7307 instruction is the same as the equivalent Coprocessor 10 instuction that 7308 exists for Single-Precision operation. */ 7309 7310 static void 7311 do_scalar_fp16_v82_encode (void) 7312 { 7313 if (inst.cond != COND_ALWAYS) 7314 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional," 7315 " the behaviour is UNPREDICTABLE")); 7316 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16), 7317 _(BAD_FP16)); 7318 7319 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900; 7320 mark_feature_used (&arm_ext_fp16); 7321 } 7256 7322 7257 7323 /* If VAL can be encoded in the immediate field of an ARM instruction, … … 7263 7329 unsigned int a, i; 7264 7330 7265 for (i = 0; i < 32; i += 2) 7331 if (val <= 0xff) 7332 return val; 7333 7334 for (i = 2; i < 32; i += 2) 7266 7335 if ((a = rotate_left (val, i)) <= 0xff) 7267 7336 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */ … … 7480 7549 && !is_load 7481 7550 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7)) 7482 as_ warn(_("use of PC in this instruction is deprecated"));7551 as_tsktsk (_("use of PC in this instruction is deprecated")); 7483 7552 } 7484 7553 … … 7728 7797 } 7729 7798 7799 #if defined BFD_HOST_64_BIT 7800 /* Returns TRUE if double precision value V may be cast 7801 to single precision without loss of accuracy. */ 7802 7803 static bfd_boolean 7804 is_double_a_single (bfd_int64_t v) 7805 { 7806 int exp = (int)((v >> 52) & 0x7FF); 7807 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL); 7808 7809 return (exp == 0 || exp == 0x7FF 7810 || (exp >= 1023 - 126 && exp <= 1023 + 127)) 7811 && (mantissa & 0x1FFFFFFFl) == 0; 7812 } 7813 7814 /* Returns a double precision value casted to single precision 7815 (ignoring the least significant bits in exponent and mantissa). */ 7816 7817 static int 7818 double_to_single (bfd_int64_t v) 7819 { 7820 int sign = (int) ((v >> 63) & 1l); 7821 int exp = (int) ((v >> 52) & 0x7FF); 7822 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL); 7823 7824 if (exp == 0x7FF) 7825 exp = 0xFF; 7826 else 7827 { 7828 exp = exp - 1023 + 127; 7829 if (exp >= 0xFF) 7830 { 7831 /* Infinity. */ 7832 exp = 0x7F; 7833 mantissa = 0; 7834 } 7835 else if (exp < 0) 7836 { 7837 /* No denormalized numbers. */ 7838 exp = 0; 7839 mantissa = 0; 7840 } 7841 } 7842 mantissa >>= 29; 7843 return (sign << 31) | (exp << 23) | mantissa; 7844 } 7845 #endif /* BFD_HOST_64_BIT */ 7846 7730 7847 enum lit_type 7731 7848 { … … 7734 7851 CONST_VEC 7735 7852 }; 7853 7854 static void do_vfp_nsyn_opcode (const char *); 7736 7855 7737 7856 /* inst.reloc.exp describes an "=expr" load pseudo-operation. … … 7750 7869 bfd_boolean thumb_p = (t == CONST_THUMB); 7751 7870 bfd_boolean arm_p = (t == CONST_ARM); 7752 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;7753 7871 7754 7872 if (thumb_p) … … 7762 7880 return TRUE; 7763 7881 } 7882 7764 7883 if (inst.reloc.exp.X_op != O_constant 7765 7884 && inst.reloc.exp.X_op != O_symbol … … 7769 7888 return TRUE; 7770 7889 } 7771 if ((inst.reloc.exp.X_op == O_constant 7772 || inst.reloc.exp.X_op == O_big) 7773 && !inst.operands[i].issingle) 7774 { 7775 if (thumb_p && inst.reloc.exp.X_op == O_constant) 7776 { 7777 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0) 7890 7891 if (inst.reloc.exp.X_op == O_constant 7892 || inst.reloc.exp.X_op == O_big) 7893 { 7894 #if defined BFD_HOST_64_BIT 7895 bfd_int64_t v; 7896 #else 7897 offsetT v; 7898 #endif 7899 if (inst.reloc.exp.X_op == O_big) 7900 { 7901 LITTLENUM_TYPE w[X_PRECISION]; 7902 LITTLENUM_TYPE * l; 7903 7904 if (inst.reloc.exp.X_add_number == -1) 7778 7905 { 7779 /* This can be done with a mov(1) instruction. */ 7780 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8); 7781 inst.instruction |= inst.reloc.exp.X_add_number; 7906 gen_to_words (w, X_PRECISION, E_PRECISION); 7907 l = w; 7908 /* FIXME: Should we check words w[2..5] ? */ 7909 } 7910 else 7911 l = generic_bignum; 7912 7913 #if defined BFD_HOST_64_BIT 7914 v = 7915 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK) 7916 << LITTLENUM_NUMBER_OF_BITS) 7917 | ((bfd_int64_t) l[2] & LITTLENUM_MASK)) 7918 << LITTLENUM_NUMBER_OF_BITS) 7919 | ((bfd_int64_t) l[1] & LITTLENUM_MASK)) 7920 << LITTLENUM_NUMBER_OF_BITS) 7921 | ((bfd_int64_t) l[0] & LITTLENUM_MASK)); 7922 #else 7923 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS) 7924 | (l[0] & LITTLENUM_MASK); 7925 #endif 7926 } 7927 else 7928 v = inst.reloc.exp.X_add_number; 7929 7930 if (!inst.operands[i].issingle) 7931 { 7932 if (thumb_p) 7933 { 7934 /* This can be encoded only for a low register. */ 7935 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8)) 7936 { 7937 /* This can be done with a mov(1) instruction. */ 7938 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8); 7939 inst.instruction |= v; 7940 return TRUE; 7941 } 7942 7943 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2) 7944 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)) 7945 { 7946 /* Check if on thumb2 it can be done with a mov.w, mvn or 7947 movw instruction. */ 7948 unsigned int newimm; 7949 bfd_boolean isNegated; 7950 7951 newimm = encode_thumb32_immediate (v); 7952 if (newimm != (unsigned int) FAIL) 7953 isNegated = FALSE; 7954 else 7955 { 7956 newimm = encode_thumb32_immediate (~v); 7957 if (newimm != (unsigned int) FAIL) 7958 isNegated = TRUE; 7959 } 7960 7961 /* The number can be loaded with a mov.w or mvn 7962 instruction. */ 7963 if (newimm != (unsigned int) FAIL 7964 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)) 7965 { 7966 inst.instruction = (0xf04f0000 /* MOV.W. */ 7967 | (inst.operands[i].reg << 8)); 7968 /* Change to MOVN. */ 7969 inst.instruction |= (isNegated ? 0x200000 : 0); 7970 inst.instruction |= (newimm & 0x800) << 15; 7971 inst.instruction |= (newimm & 0x700) << 4; 7972 inst.instruction |= (newimm & 0x0ff); 7973 return TRUE; 7974 } 7975 /* The number can be loaded with a movw instruction. */ 7976 else if ((v & ~0xFFFF) == 0 7977 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)) 7978 { 7979 int imm = v & 0xFFFF; 7980 7981 inst.instruction = 0xf2400000; /* MOVW. */ 7982 inst.instruction |= (inst.operands[i].reg << 8); 7983 inst.instruction |= (imm & 0xf000) << 4; 7984 inst.instruction |= (imm & 0x0800) << 15; 7985 inst.instruction |= (imm & 0x0700) << 4; 7986 inst.instruction |= (imm & 0x00ff); 7987 return TRUE; 7988 } 7989 } 7990 } 7991 else if (arm_p) 7992 { 7993 int value = encode_arm_immediate (v); 7994 7995 if (value != FAIL) 7996 { 7997 /* This can be done with a mov instruction. */ 7998 inst.instruction &= LITERAL_MASK; 7999 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); 8000 inst.instruction |= value & 0xfff; 8001 return TRUE; 8002 } 8003 8004 value = encode_arm_immediate (~ v); 8005 if (value != FAIL) 8006 { 8007 /* This can be done with a mvn instruction. */ 8008 inst.instruction &= LITERAL_MASK; 8009 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); 8010 inst.instruction |= value & 0xfff; 8011 return TRUE; 8012 } 8013 } 8014 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)) 8015 { 8016 int op = 0; 8017 unsigned immbits = 0; 8018 unsigned immlo = inst.operands[1].imm; 8019 unsigned immhi = inst.operands[1].regisimm 8020 ? inst.operands[1].reg 8021 : inst.reloc.exp.X_unsigned 8022 ? 0 8023 : ((bfd_int64_t)((int) immlo)) >> 32; 8024 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits, 8025 &op, 64, NT_invtype); 8026 8027 if (cmode == FAIL) 8028 { 8029 neon_invert_size (&immlo, &immhi, 64); 8030 op = !op; 8031 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits, 8032 &op, 64, NT_invtype); 8033 } 8034 8035 if (cmode != FAIL) 8036 { 8037 inst.instruction = (inst.instruction & VLDR_VMOV_SAME) 8038 | (1 << 23) 8039 | (cmode << 8) 8040 | (op << 5) 8041 | (1 << 4); 8042 8043 /* Fill other bits in vmov encoding for both thumb and arm. */ 8044 if (thumb_mode) 8045 inst.instruction |= (0x7U << 29) | (0xF << 24); 8046 else 8047 inst.instruction |= (0xFU << 28) | (0x1 << 25); 8048 neon_write_immbits (immbits); 8049 return TRUE; 8050 } 8051 } 8052 } 8053 8054 if (t == CONST_VEC) 8055 { 8056 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */ 8057 if (inst.operands[i].issingle 8058 && is_quarter_float (inst.operands[1].imm) 8059 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd)) 8060 { 8061 inst.operands[1].imm = 8062 neon_qfloat_bits (v); 8063 do_vfp_nsyn_opcode ("fconsts"); 7782 8064 return TRUE; 7783 8065 } 7784 } 7785 else if (arm_p && inst.reloc.exp.X_op == O_constant) 7786 { 7787 int value = encode_arm_immediate (inst.reloc.exp.X_add_number); 7788 if (value != FAIL) 8066 8067 /* If our host does not support a 64-bit type then we cannot perform 8068 the following optimization. This mean that there will be a 8069 discrepancy between the output produced by an assembler built for 8070 a 32-bit-only host and the output produced from a 64-bit host, but 8071 this cannot be helped. */ 8072 #if defined BFD_HOST_64_BIT 8073 else if (!inst.operands[1].issingle 8074 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 7789 8075 { 7790 /* This can be done with a mov instruction. */ 7791 inst.instruction &= LITERAL_MASK; 7792 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); 7793 inst.instruction |= value & 0xfff; 7794 return TRUE; 8076 if (is_double_a_single (v) 8077 && is_quarter_float (double_to_single (v))) 8078 { 8079 inst.operands[1].imm = 8080 neon_qfloat_bits (double_to_single (v)); 8081 do_vfp_nsyn_opcode ("fconstd"); 8082 return TRUE; 8083 } 7795 8084 } 7796 7797 value = encode_arm_immediate (~inst.reloc.exp.X_add_number); 7798 if (value != FAIL) 7799 { 7800 /* This can be done with a mvn instruction. */ 7801 inst.instruction &= LITERAL_MASK; 7802 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); 7803 inst.instruction |= value & 0xfff; 7804 return TRUE; 7805 } 7806 } 7807 else if (vec64_p) 7808 { 7809 int op = 0; 7810 unsigned immbits = 0; 7811 unsigned immlo = inst.operands[1].imm; 7812 unsigned immhi = inst.operands[1].regisimm 7813 ? inst.operands[1].reg 7814 : inst.reloc.exp.X_unsigned 7815 ? 0 7816 : ((bfd_int64_t)((int) immlo)) >> 32; 7817 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits, 7818 &op, 64, NT_invtype); 7819 7820 if (cmode == FAIL) 7821 { 7822 neon_invert_size (&immlo, &immhi, 64); 7823 op = !op; 7824 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits, 7825 &op, 64, NT_invtype); 7826 } 7827 if (cmode != FAIL) 7828 { 7829 inst.instruction = (inst.instruction & VLDR_VMOV_SAME) 7830 | (1 << 23) 7831 | (cmode << 8) 7832 | (op << 5) 7833 | (1 << 4); 7834 /* Fill other bits in vmov encoding for both thumb and arm. */ 7835 if (thumb_mode) 7836 inst.instruction |= (0x7 << 29) | (0xF << 24); 7837 else 7838 inst.instruction |= (0xF << 28) | (0x1 << 25); 7839 neon_write_immbits (immbits); 7840 return TRUE; 7841 } 8085 #endif 7842 8086 } 7843 8087 } … … 7872 8116 if (!inst.operands[i].isreg) 7873 8117 { 7874 gas_assert (inst.operands[0].isvec); 8118 /* PR 18256 */ 8119 if (! inst.operands[0].isvec) 8120 { 8121 inst.error = _("invalid co-processor operand"); 8122 return FAIL; 8123 } 7875 8124 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE)) 7876 8125 return SUCCESS; … … 7947 8196 7948 8197 static void 8198 do_rn (void) 8199 { 8200 inst.instruction |= inst.operands[0].reg << 16; 8201 } 8202 8203 static void 7949 8204 do_rd_rm (void) 7950 8205 { … … 7974 8229 } 7975 8230 8231 static void 8232 do_tt (void) 8233 { 8234 inst.instruction |= inst.operands[0].reg << 8; 8235 inst.instruction |= inst.operands[1].reg << 16; 8236 } 8237 7976 8238 static bfd_boolean 7977 8239 check_obsolete (const arm_feature_set *feature, const char *msg) … … 7979 8241 if (ARM_CPU_IS_ANY (cpu_variant)) 7980 8242 { 7981 as_ warn("%s", msg);8243 as_tsktsk ("%s", msg); 7982 8244 return TRUE; 7983 8245 } … … 8007 8269 && warn_on_deprecated 8008 8270 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6)) 8009 as_ warn(_("swp{b} use is deprecated for ARMv6 and ARMv7"));8271 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7")); 8010 8272 } 8011 8273 … … 8089 8351 do_arit (void) 8090 8352 { 8353 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 8354 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC , 8355 THUMB1_RELOC_ONLY); 8091 8356 if (!inst.operands[1].present) 8092 8357 inst.operands[1].reg = inst.operands[0].reg; … … 8325 8590 { 8326 8591 {15, 0, 7, 10, 5, /* CP15DMB. */ 8327 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),8592 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, 8328 8593 DEPR_ACCESS_V8, NULL}, 8329 8594 {15, 0, 7, 10, 4, /* CP15DSB. */ 8330 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),8595 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, 8331 8596 DEPR_ACCESS_V8, NULL}, 8332 8597 {15, 0, 7, 5, 4, /* CP15ISB. */ 8333 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),8598 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, 8334 8599 DEPR_ACCESS_V8, NULL}, 8335 8600 {14, 6, 1, 0, 0, /* TEEHBR. */ 8336 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),8601 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, 8337 8602 DEPR_ACCESS_V8, NULL}, 8338 8603 {14, 6, 0, 0, 0, /* TEECR. */ 8339 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),8604 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, 8340 8605 DEPR_ACCESS_V8, NULL}, 8341 8606 }; … … 8384 8649 && warn_on_deprecated 8385 8650 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated)) 8386 as_ warn("%s", r->dep_msg);8651 as_tsktsk ("%s", r->dep_msg); 8387 8652 } 8388 8653 } … … 8746 9011 do_mov (void) 8747 9012 { 9013 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 9014 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC , 9015 THUMB1_RELOC_ONLY); 8748 9016 inst.instruction |= inst.operands[0].reg << 12; 8749 9017 encode_arm_shifter_operand (1); … … 8771 9039 } 8772 9040 } 8773 8774 static void do_vfp_nsyn_opcode (const char *);8775 9041 8776 9042 static int … … 9030 9296 do_push_pop (void) 9031 9297 { 9298 constraint (inst.operands[0].writeback, 9299 _("push/pop do not support {reglist}^")); 9032 9300 inst.operands[1] = inst.operands[0]; 9033 9301 memset (&inst.operands[0], 0, sizeof inst.operands[0]); … … 9107 9375 if (warn_on_deprecated 9108 9376 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) 9109 as_ warn(_("setend use is deprecated for ARMv8"));9377 as_tsktsk (_("setend use is deprecated for ARMv8")); 9110 9378 9111 9379 if (inst.operands[0].imm) … … 9153 9421 inst.reloc.type = BFD_RELOC_ARM_SWI; 9154 9422 inst.reloc.pc_rel = 0; 9423 } 9424 9425 static void 9426 do_setpan (void) 9427 { 9428 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan), 9429 _("selected processor does not support SETPAN instruction")); 9430 9431 inst.instruction |= ((inst.operands[0].imm & 1) << 9); 9432 } 9433 9434 static void 9435 do_t_setpan (void) 9436 { 9437 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan), 9438 _("selected processor does not support SETPAN instruction")); 9439 9440 inst.instruction |= (inst.operands[0].imm << 3); 9155 9441 } 9156 9442 … … 9753 10039 { 9754 10040 inst.instruction &= ~0x1a000ff; 9755 inst.instruction |= (0xf << 28);10041 inst.instruction |= (0xfU << 28); 9756 10042 if (inst.operands[1].preind) 9757 10043 inst.instruction |= PRE_INDEX; … … 9832 10118 /* Map 32 -> 0, etc. */ 9833 10119 inst.operands[2].imm &= 0x1f; 9834 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);10120 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf); 9835 10121 } 9836 10122 } … … 10232 10518 inst.instruction = THUMB_OP16(opcode); 10233 10519 inst.instruction |= (Rd << 4) | Rs; 10234 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 10235 if (inst.size_req != 2) 10236 inst.relax = opcode; 10520 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 10521 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) 10522 { 10523 if (inst.size_req == 2) 10524 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 10525 else 10526 inst.relax = opcode; 10527 } 10237 10528 } 10238 10529 else … … 10242 10533 || (inst.size_req != 2 && !opcode)) 10243 10534 { 10535 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 10536 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC , 10537 THUMB1_RELOC_ONLY); 10244 10538 if (Rd == REG_PC) 10245 10539 { … … 10710 11004 int opcode; 10711 11005 int cond; 10712 intreloc;11006 bfd_reloc_code_real_type reloc; 10713 11007 10714 11008 cond = inst.cond; … … 10740 11034 else 10741 11035 { 11036 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2), 11037 _("selected architecture does not support " 11038 "wide conditional branch instruction")); 11039 10742 11040 gas_assert (cond != 0xF); 10743 11041 inst.instruction |= cond << 22; … … 11556 11854 && (Rm == REG_SP || Rm == REG_PC)) 11557 11855 { 11558 as_ warn(_("Use of r%u as a source register is "11856 as_tsktsk (_("Use of r%u as a source register is " 11559 11857 "deprecated when r%u is the destination " 11560 11858 "register."), Rm, Rn); … … 11582 11880 inst.instruction = THUMB_OP16 (opcode); 11583 11881 inst.instruction |= Rn << 8; 11584 if (inst.size_req == 2) 11585 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 11586 else 11587 inst.relax = opcode; 11882 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 11883 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) 11884 { 11885 if (inst.size_req == 2) 11886 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 11887 else 11888 inst.relax = opcode; 11889 } 11588 11890 } 11589 11891 else 11590 11892 { 11893 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 11894 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC , 11895 THUMB1_RELOC_ONLY); 11896 11591 11897 inst.instruction = THUMB_OP32 (inst.instruction); 11592 11898 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; … … 11918 12224 If the user has specified -march=all, we want to ignore it as 11919 12225 we are building for any CPU type, including non-m variants. */ 11920 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core; 12226 bfd_boolean m_profile = 12227 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any); 11921 12228 constraint ((flags != 0) && m_profile, _("selected processor does " 11922 12229 "not support requested special purpose register")); … … 11958 12265 If the user has specified -march=all, we want to ignore it as 11959 12266 we are building for any CPU type, including non-m variants. */ 11960 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core; 12267 bfd_boolean m_profile = 12268 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any); 11961 12269 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp) 11962 12270 && (bits & ~(PSR_s | PSR_f)) != 0) … … 12241 12549 inst.instruction = THUMB_OP16 (inst.instruction) | mask; 12242 12550 else if (inst.size_req != 4 12243 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push12551 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push 12244 12552 ? REG_LR : REG_PC))) 12245 12553 { … … 12380 12688 if (warn_on_deprecated 12381 12689 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) 12382 as_ warn(_("setend use is deprecated for ARMv8"));12690 as_tsktsk (_("setend use is deprecated for ARMv8")); 12383 12691 12384 12692 set_it_insn_type (OUTSIDE_IT_INSN); … … 12875 13183 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \ 12876 13184 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \ 13185 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \ 13186 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \ 12877 13187 X(vshl, 0x0000400, N_INV, 0x0800510), \ 12878 13188 X(vqshl, 0x0000410, N_INV, 0x0800710), \ … … 13028 13338 X(2, (R, S), SINGLE), \ 13029 13339 X(2, (F, R), SINGLE), \ 13030 X(2, (R, F), SINGLE) 13340 X(2, (R, F), SINGLE), \ 13341 /* Half float shape supported so far. */\ 13342 X (2, (H, D), MIXED), \ 13343 X (2, (D, H), MIXED), \ 13344 X (2, (H, F), MIXED), \ 13345 X (2, (F, H), MIXED), \ 13346 X (2, (H, H), HALF), \ 13347 X (2, (H, R), HALF), \ 13348 X (2, (R, H), HALF), \ 13349 X (2, (H, I), HALF), \ 13350 X (3, (H, H, H), HALF), \ 13351 X (3, (H, F, I), MIXED), \ 13352 X (3, (F, H, I), MIXED) 13031 13353 13032 13354 #define S2(A,B) NS_##A##B … … 13049 13371 enum neon_shape_class 13050 13372 { 13373 SC_HALF, 13051 13374 SC_SINGLE, 13052 13375 SC_DOUBLE, … … 13066 13389 enum neon_shape_el 13067 13390 { 13391 SE_H, 13068 13392 SE_F, 13069 13393 SE_D, … … 13078 13402 static unsigned neon_shape_el_size[] = 13079 13403 { 13404 16, 13080 13405 32, 13081 13406 64, … … 13159 13484 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) 13160 13485 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64) 13161 #define N_SUF_32 (N_SU_32 | N_F32) 13486 #define N_S_32 (N_S8 | N_S16 | N_S32) 13487 #define N_F_16_32 (N_F16 | N_F32) 13488 #define N_SUF_32 (N_SU_32 | N_F_16_32) 13162 13489 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64) 13163 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32) 13490 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32) 13491 #define N_F_ALL (N_F16 | N_F32 | N_F64) 13164 13492 13165 13493 /* Pass this as the first type argument to neon_check_type to ignore types … … 13203 13531 switch (neon_shape_tab[shape].el[j]) 13204 13532 { 13533 /* If a .f16, .16, .u16, .s16 type specifier is given over 13534 a VFP single precision register operand, it's essentially 13535 means only half of the register is used. 13536 13537 If the type specifier is given after the mnemonics, the 13538 information is stored in inst.vectype. If the type specifier 13539 is given after register operand, the information is stored 13540 in inst.operands[].vectype. 13541 13542 When there is only one type specifier, and all the register 13543 operands are the same type of hardware register, the type 13544 specifier applies to all register operands. 13545 13546 If no type specifier is given, the shape is inferred from 13547 operand information. 13548 13549 for example: 13550 vadd.f16 s0, s1, s2: NS_HHH 13551 vabs.f16 s0, s1: NS_HH 13552 vmov.f16 s0, r1: NS_HR 13553 vmov.f16 r0, s1: NS_RH 13554 vcvt.f16 r0, s1: NS_RH 13555 vcvt.f16.s32 s2, s2, #29: NS_HFI 13556 vcvt.f16.s32 s2, s2: NS_HF 13557 */ 13558 case SE_H: 13559 if (!(inst.operands[j].isreg 13560 && inst.operands[j].isvec 13561 && inst.operands[j].issingle 13562 && !inst.operands[j].isquad 13563 && ((inst.vectype.elems == 1 13564 && inst.vectype.el[0].size == 16) 13565 || (inst.vectype.elems > 1 13566 && inst.vectype.el[j].size == 16) 13567 || (inst.vectype.elems == 0 13568 && inst.operands[j].vectype.type != NT_invtype 13569 && inst.operands[j].vectype.size == 16)))) 13570 matches = 0; 13571 break; 13572 13205 13573 case SE_F: 13206 13574 if (!(inst.operands[j].isreg 13207 13575 && inst.operands[j].isvec 13208 13576 && inst.operands[j].issingle 13209 && !inst.operands[j].isquad)) 13577 && !inst.operands[j].isquad 13578 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32) 13579 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32) 13580 || (inst.vectype.elems == 0 13581 && (inst.operands[j].vectype.size == 32 13582 || inst.operands[j].vectype.type == NT_invtype))))) 13210 13583 matches = 0; 13211 13584 break; … … 13423 13796 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0) 13424 13797 *type = NT_poly; 13425 else if ((mask & (N_F 16 | N_F32 | N_F64)) != 0)13798 else if ((mask & (N_F_ALL)) != 0) 13426 13799 *type = NT_float; 13427 13800 else … … 13585 13958 k_size = g_size; 13586 13959 key_allowed = thisarg & ~N_KEY; 13960 13961 /* Check architecture constraint on FP16 extension. */ 13962 if (k_size == 16 13963 && k_type == NT_float 13964 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)) 13965 { 13966 inst.error = _(BAD_FP16); 13967 return badtype; 13968 } 13587 13969 } 13588 13970 } … … 13610 13992 else 13611 13993 match = g_size; 13994 13995 /* FP16 will use a single precision register. */ 13996 if (regwidth == 32 && match == 16) 13997 { 13998 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)) 13999 match = regwidth; 14000 else 14001 { 14002 inst.error = _(BAD_FP16); 14003 return badtype; 14004 } 14005 } 13612 14006 13613 14007 if (regwidth != match) … … 13702 14096 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd; 13703 14097 13704 if (rs == NS_FFF )14098 if (rs == NS_FFF || rs == NS_HHH) 13705 14099 { 13706 14100 if (is_add) … … 13708 14102 else 13709 14103 do_vfp_nsyn_opcode ("fsubs"); 14104 14105 /* ARMv8.2 fp16 instruction. */ 14106 if (rs == NS_HHH) 14107 do_scalar_fp16_v82_encode (); 13710 14108 } 13711 14109 else … … 13730 14128 { 13731 14129 case 2: 13732 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 13733 et = neon_check_type (2, rs, 13734 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 14130 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL); 14131 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP); 13735 14132 break; 13736 14133 13737 14134 case 3: 13738 rs = neon_select_shape (NS_ FFF, NS_DDD, NS_NULL);13739 et = neon_check_type (3, rs, 13740 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64| N_KEY | N_VFP);14135 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL); 14136 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 14137 N_F_ALL | N_KEY | N_VFP); 13741 14138 break; 13742 14139 … … 13760 14157 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla; 13761 14158 13762 if (rs == NS_FFF )14159 if (rs == NS_FFF || rs == NS_HHH) 13763 14160 { 13764 14161 if (is_mla) … … 13766 14163 else 13767 14164 do_vfp_nsyn_opcode ("fnmacs"); 14165 14166 /* ARMv8.2 fp16 instruction. */ 14167 if (rs == NS_HHH) 14168 do_scalar_fp16_v82_encode (); 13768 14169 } 13769 14170 else … … 13781 14182 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma; 13782 14183 13783 if (rs == NS_FFF )14184 if (rs == NS_FFF || rs == NS_HHH) 13784 14185 { 13785 14186 if (is_fma) … … 13787 14188 else 13788 14189 do_vfp_nsyn_opcode ("ffnmas"); 14190 14191 /* ARMv8.2 fp16 instruction. */ 14192 if (rs == NS_HHH) 14193 do_scalar_fp16_v82_encode (); 13789 14194 } 13790 14195 else … … 13800 14205 do_vfp_nsyn_mul (enum neon_shape rs) 13801 14206 { 13802 if (rs == NS_FFF) 13803 do_vfp_nsyn_opcode ("fmuls"); 14207 if (rs == NS_FFF || rs == NS_HHH) 14208 { 14209 do_vfp_nsyn_opcode ("fmuls"); 14210 14211 /* ARMv8.2 fp16 instruction. */ 14212 if (rs == NS_HHH) 14213 do_scalar_fp16_v82_encode (); 14214 } 13804 14215 else 13805 14216 do_vfp_nsyn_opcode ("fmuld"); … … 13810 14221 { 13811 14222 int is_neg = (inst.instruction & 0x80) != 0; 13812 neon_check_type (2, rs, N_EQK | N_VFP, N_F 32 | N_F64| N_VFP | N_KEY);13813 13814 if (rs == NS_FF )14223 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY); 14224 14225 if (rs == NS_FF || rs == NS_HH) 13815 14226 { 13816 14227 if (is_neg) … … 13818 14229 else 13819 14230 do_vfp_nsyn_opcode ("fabss"); 14231 14232 /* ARMv8.2 fp16 instruction. */ 14233 if (rs == NS_HH) 14234 do_scalar_fp16_v82_encode (); 13820 14235 } 13821 14236 else … … 13854 14269 do_vfp_nsyn_sqrt (void) 13855 14270 { 13856 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 13857 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 13858 13859 if (rs == NS_FF) 13860 do_vfp_nsyn_opcode ("fsqrts"); 14271 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL); 14272 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP); 14273 14274 if (rs == NS_FF || rs == NS_HH) 14275 { 14276 do_vfp_nsyn_opcode ("fsqrts"); 14277 14278 /* ARMv8.2 fp16 instruction. */ 14279 if (rs == NS_HH) 14280 do_scalar_fp16_v82_encode (); 14281 } 13861 14282 else 13862 14283 do_vfp_nsyn_opcode ("fsqrtd"); … … 13866 14287 do_vfp_nsyn_div (void) 13867 14288 { 13868 enum neon_shape rs = neon_select_shape (NS_ FFF, NS_DDD, NS_NULL);14289 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL); 13869 14290 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 13870 N_F32 | N_F64 | N_KEY | N_VFP); 13871 13872 if (rs == NS_FFF) 13873 do_vfp_nsyn_opcode ("fdivs"); 14291 N_F_ALL | N_KEY | N_VFP); 14292 14293 if (rs == NS_FFF || rs == NS_HHH) 14294 { 14295 do_vfp_nsyn_opcode ("fdivs"); 14296 14297 /* ARMv8.2 fp16 instruction. */ 14298 if (rs == NS_HHH) 14299 do_scalar_fp16_v82_encode (); 14300 } 13874 14301 else 13875 14302 do_vfp_nsyn_opcode ("fdivd"); … … 13879 14306 do_vfp_nsyn_nmul (void) 13880 14307 { 13881 enum neon_shape rs = neon_select_shape (NS_ FFF, NS_DDD, NS_NULL);14308 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL); 13882 14309 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 13883 N_F32 | N_F64| N_KEY | N_VFP);13884 13885 if (rs == NS_FFF )14310 N_F_ALL | N_KEY | N_VFP); 14311 14312 if (rs == NS_FFF || rs == NS_HHH) 13886 14313 { 13887 14314 NEON_ENCODE (SINGLE, inst); 13888 14315 do_vfp_sp_dyadic (); 14316 14317 /* ARMv8.2 fp16 instruction. */ 14318 if (rs == NS_HHH) 14319 do_scalar_fp16_v82_encode (); 13889 14320 } 13890 14321 else … … 13894 14325 } 13895 14326 do_vfp_cond_or_thumb (); 14327 13896 14328 } 13897 14329 … … 13899 14331 do_vfp_nsyn_cmp (void) 13900 14332 { 14333 enum neon_shape rs; 13901 14334 if (inst.operands[1].isreg) 13902 14335 { 13903 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);13904 neon_check_type (2, rs, N_EQK | N_VFP, N_F 32 | N_F64| N_KEY | N_VFP);13905 13906 if (rs == NS_FF )14336 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL); 14337 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP); 14338 14339 if (rs == NS_FF || rs == NS_HH) 13907 14340 { 13908 14341 NEON_ENCODE (SINGLE, inst); … … 13917 14350 else 13918 14351 { 13919 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);13920 neon_check_type (2, rs, N_F 32 | N_F64| N_KEY | N_VFP, N_EQK);14352 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL); 14353 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK); 13921 14354 13922 14355 switch (inst.instruction & 0x0fffffff) … … 13932 14365 } 13933 14366 13934 if (rs == NS_FI )14367 if (rs == NS_FI || rs == NS_HI) 13935 14368 { 13936 14369 NEON_ENCODE (SINGLE, inst); … … 13944 14377 } 13945 14378 do_vfp_cond_or_thumb (); 14379 14380 /* ARMv8.2 fp16 instruction. */ 14381 if (rs == NS_HI || rs == NS_HH) 14382 do_scalar_fp16_v82_encode (); 13946 14383 } 13947 14384 … … 14108 14545 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 14109 14546 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL); 14547 int imm = inst.operands[2].imm; 14548 14549 constraint (imm < 0 || (unsigned)imm >= et.size, 14550 _("immediate out of range for shift")); 14110 14551 NEON_ENCODE (IMMED, inst); 14111 neon_imm_shift (FALSE, 0, neon_quad (rs), et, i nst.operands[2].imm);14552 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 14112 14553 } 14113 14554 else … … 14140 14581 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 14141 14582 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 14142 14583 int imm = inst.operands[2].imm; 14584 14585 constraint (imm < 0 || (unsigned)imm >= et.size, 14586 _("immediate out of range for shift")); 14143 14587 NEON_ENCODE (IMMED, inst); 14144 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 14145 inst.operands[2].imm); 14588 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm); 14146 14589 } 14147 14590 else … … 14335 14778 { 14336 14779 NEON_ENCODE (FLOAT, inst); 14337 neon_three_same (neon_quad (rs), 0, -1);14780 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1); 14338 14781 } 14339 14782 else … … 14450 14893 neon_exchange_operands (void) 14451 14894 { 14452 void *scratch = alloca (sizeof (inst.operands[0]));14453 14895 if (inst.operands[1].present) 14454 14896 { 14897 void *scratch = xmalloc (sizeof (inst.operands[0])); 14898 14455 14899 /* Swap operands[1] and operands[2]. */ 14456 14900 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0])); 14457 14901 inst.operands[1] = inst.operands[2]; 14458 14902 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0])); 14903 free (scratch); 14459 14904 } 14460 14905 else … … 14496 14941 do_neon_cmp (void) 14497 14942 { 14498 neon_compare (N_SUF_32, N_S 8 | N_S16 | N_S32 | N_F32, FALSE);14943 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE); 14499 14944 } 14500 14945 … … 14502 14947 do_neon_cmp_inv (void) 14503 14948 { 14504 neon_compare (N_SUF_32, N_S 8 | N_S16 | N_S32 | N_F32, TRUE);14949 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE); 14505 14950 } 14506 14951 … … 14581 15026 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 14582 15027 struct neon_type_el et = neon_check_type (3, rs, 14583 N_EQK, N_EQK, N_I16 | N_I32 | N_F 32 | N_KEY);15028 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY); 14584 15029 NEON_ENCODE (SCALAR, inst); 14585 15030 neon_mul_mac (et, neon_quad (rs)); … … 14630 15075 do_neon_mac_maybe_scalar (); 14631 15076 else 14632 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F 32 | N_P8, 0);15077 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0); 14633 15078 } 14634 15079 … … 14656 15101 14657 15102 static void 15103 do_neon_qrdmlah (void) 15104 { 15105 /* Check we're on the correct architecture. */ 15106 if (!mark_feature_used (&fpu_neon_ext_armv8)) 15107 inst.error = 15108 _("instruction form not available on this architecture."); 15109 else if (!mark_feature_used (&fpu_neon_ext_v8_1)) 15110 { 15111 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD.")); 15112 record_feature_use (&fpu_neon_ext_v8_1); 15113 } 15114 15115 if (inst.operands[2].isscalar) 15116 { 15117 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 15118 struct neon_type_el et = neon_check_type (3, rs, 15119 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 15120 NEON_ENCODE (SCALAR, inst); 15121 neon_mul_mac (et, neon_quad (rs)); 15122 } 15123 else 15124 { 15125 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 15126 struct neon_type_el et = neon_check_type (3, rs, 15127 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 15128 NEON_ENCODE (INTEGER, inst); 15129 /* The U bit (rounding) comes from bit mask. */ 15130 neon_three_same (neon_quad (rs), 0, et.size); 15131 } 15132 } 15133 15134 static void 14658 15135 do_neon_fcmp_absolute (void) 14659 15136 { 14660 15137 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 14661 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 15138 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK, 15139 N_F_16_32 | N_KEY); 14662 15140 /* Size field comes from bit mask. */ 14663 neon_three_same (neon_quad (rs), 1, -1);15141 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1); 14664 15142 } 14665 15143 … … 14675 15153 { 14676 15154 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 14677 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 14678 neon_three_same (neon_quad (rs), 0, -1); 15155 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK, 15156 N_F_16_32 | N_KEY); 15157 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1); 14679 15158 } 14680 15159 … … 14692 15171 14693 15172 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 14694 et = neon_check_type (2, rs, N_EQK, N_S 8 | N_S16 | N_S32 | N_F32 | N_KEY);15173 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY); 14695 15174 14696 15175 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; … … 14901 15380 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \ 14902 15381 /* Half-precision conversions. */ \ 15382 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \ 15383 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \ 15384 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \ 15385 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \ 14903 15386 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \ 14904 15387 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \ 15388 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \ 15389 Compared with single/double precision variants, only the co-processor \ 15390 field is different, so the encoding flow is reused here. */ \ 15391 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \ 15392 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \ 15393 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\ 15394 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\ 14905 15395 /* VFP instructions. */ \ 14906 15396 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \ … … 14977 15467 const char *opname = 0; 14978 15468 14979 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI) 15469 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI 15470 || rs == NS_FHI || rs == NS_HFI) 14980 15471 { 14981 15472 /* Conversions with immediate bitshift. */ … … 15014 15505 if (opname) 15015 15506 do_vfp_nsyn_opcode (opname); 15507 15508 /* ARMv8.2 fp16 VCVT instruction. */ 15509 if (flavour == neon_cvt_flavour_s32_f16 15510 || flavour == neon_cvt_flavour_u32_f16 15511 || flavour == neon_cvt_flavour_f16_u32 15512 || flavour == neon_cvt_flavour_f16_s32) 15513 do_scalar_fp16_v82_encode (); 15016 15514 } 15017 15515 … … 15019 15517 do_vfp_nsyn_cvtz (void) 15020 15518 { 15021 enum neon_shape rs = neon_select_shape (NS_F F, NS_FD, NS_NULL);15519 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL); 15022 15520 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs); 15023 15521 const char *enc[] = … … 15040 15538 int rm; 15041 15539 15540 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with 15541 D register operands. */ 15542 if (flavour == neon_cvt_flavour_s32_f64 15543 || flavour == neon_cvt_flavour_u32_f64) 15544 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), 15545 _(BAD_FPU)); 15546 15547 if (flavour == neon_cvt_flavour_s32_f16 15548 || flavour == neon_cvt_flavour_u32_f16) 15549 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16), 15550 _(BAD_FP16)); 15551 15042 15552 set_it_insn_type (OUTSIDE_IT_INSN); 15043 15553 … … 15049 15559 break; 15050 15560 case neon_cvt_flavour_s32_f32: 15561 sz = 0; 15562 op = 1; 15563 break; 15564 case neon_cvt_flavour_s32_f16: 15051 15565 sz = 0; 15052 15566 op = 1; … … 15060 15574 op = 0; 15061 15575 break; 15576 case neon_cvt_flavour_u32_f16: 15577 sz = 0; 15578 op = 0; 15579 break; 15062 15580 default: 15063 15581 first_error (_("invalid instruction shape")); … … 15078 15596 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm); 15079 15597 inst.instruction |= sz << 8; 15598 15599 /* ARMv8.2 fp16 VCVT instruction. */ 15600 if (flavour == neon_cvt_flavour_s32_f16 15601 ||flavour == neon_cvt_flavour_u32_f16) 15602 do_scalar_fp16_v82_encode (); 15080 15603 inst.instruction |= op << 7; 15081 15604 inst.instruction |= rm << 16; … … 15088 15611 { 15089 15612 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ, 15090 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL); 15613 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, 15614 NS_FH, NS_HF, NS_FHI, NS_HFI, 15615 NS_NULL); 15091 15616 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs); 15617 15618 if (flavour == neon_cvt_flavour_invalid) 15619 return; 15092 15620 15093 15621 /* PR11109: Handle round-to-zero for VCVT conversions. */ 15094 15622 if (mode == neon_cvt_mode_z 15095 15623 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2) 15096 && (flavour == neon_cvt_flavour_s32_f32 15624 && (flavour == neon_cvt_flavour_s16_f16 15625 || flavour == neon_cvt_flavour_u16_f16 15626 || flavour == neon_cvt_flavour_s32_f32 15097 15627 || flavour == neon_cvt_flavour_u32_f32 15098 15628 || flavour == neon_cvt_flavour_s32_f64 … … 15104 15634 } 15105 15635 15636 /* ARMv8.2 fp16 VCVT conversions. */ 15637 if (mode == neon_cvt_mode_z 15638 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16) 15639 && (flavour == neon_cvt_flavour_s32_f16 15640 || flavour == neon_cvt_flavour_u32_f16) 15641 && (rs == NS_FH)) 15642 { 15643 do_vfp_nsyn_cvtz (); 15644 do_scalar_fp16_v82_encode (); 15645 return; 15646 } 15647 15106 15648 /* VFP rather than Neon conversions. */ 15107 15649 if (flavour >= neon_cvt_flavour_first_fp) … … 15121 15663 { 15122 15664 unsigned immbits; 15123 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 }; 15665 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000, 15666 0x0000100, 0x1000100, 0x0, 0x1000000}; 15124 15667 15125 15668 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) … … 15130 15673 if (inst.operands[2].present && inst.operands[2].imm == 0) 15131 15674 goto int_encode; 15132 immbits = 32 - inst.operands[2].imm;15133 15675 NEON_ENCODE (IMMED, inst); 15134 15676 if (flavour != neon_cvt_flavour_invalid) … … 15140 15682 inst.instruction |= neon_quad (rs) << 6; 15141 15683 inst.instruction |= 1 << 21; 15142 inst.instruction |= immbits << 16; 15684 if (flavour < neon_cvt_flavour_s16_f16) 15685 { 15686 inst.instruction |= 1 << 21; 15687 immbits = 32 - inst.operands[2].imm; 15688 inst.instruction |= immbits << 16; 15689 } 15690 else 15691 { 15692 inst.instruction |= 3 << 20; 15693 immbits = 16 - inst.operands[2].imm; 15694 inst.instruction |= immbits << 16; 15695 inst.instruction &= ~(1 << 9); 15696 } 15143 15697 15144 15698 neon_dp_fixup (&inst); … … 15161 15715 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 15162 15716 inst.instruction |= neon_quad (rs) << 6; 15163 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7; 15717 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16 15718 || flavour == neon_cvt_flavour_u32_f32) << 7; 15164 15719 inst.instruction |= mode << 8; 15720 if (flavour == neon_cvt_flavour_u16_f16 15721 || flavour == neon_cvt_flavour_s16_f16) 15722 /* Mask off the original size bits and reencode them. */ 15723 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18)); 15724 15165 15725 if (thumb_mode) 15166 15726 inst.instruction |= 0xfc000000; … … 15172 15732 int_encode: 15173 15733 { 15174 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 }; 15734 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080, 15735 0x100, 0x180, 0x0, 0x080}; 15175 15736 15176 15737 NEON_ENCODE (INTEGER, inst); … … 15187 15748 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 15188 15749 inst.instruction |= neon_quad (rs) << 6; 15189 inst.instruction |= 2 << 18; 15750 if (flavour >= neon_cvt_flavour_s16_f16 15751 && flavour <= neon_cvt_flavour_f16_u16) 15752 /* Half precision. */ 15753 inst.instruction |= 1 << 18; 15754 else 15755 inst.instruction |= 2 << 18; 15190 15756 15191 15757 neon_dp_fixup (&inst); … … 15288 15854 do_neon_cvttb_1 (bfd_boolean t) 15289 15855 { 15290 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL); 15856 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD, 15857 NS_DF, NS_DH, NS_NULL); 15291 15858 15292 15859 if (rs == NS_NULL) … … 15304 15871 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype) 15305 15872 { 15873 /* The VCVTB and VCVTT instructions with D-register operands 15874 don't work for SP only targets. */ 15875 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), 15876 _(BAD_FPU)); 15877 15306 15878 inst.error = NULL; 15307 15879 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE); … … 15309 15881 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype) 15310 15882 { 15883 /* The VCVTB and VCVTT instructions with D-register operands 15884 don't work for SP only targets. */ 15885 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), 15886 _(BAD_FPU)); 15887 15311 15888 inst.error = NULL; 15312 15889 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE); … … 15658 16235 { 15659 16236 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD, 15660 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR, 15661 NS_NULL); 16237 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, 16238 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR, 16239 NS_HR, NS_RH, NS_HI, NS_NULL); 15662 16240 struct neon_type_el et; 15663 16241 const char *ldconst = 0; … … 15837 16415 break; 15838 16416 16417 case NS_HI: 15839 16418 case NS_FI: /* case 10 (fconsts). */ 15840 16419 ldconst = "fconsts"; … … 15844 16423 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm); 15845 16424 do_vfp_nsyn_opcode (ldconst); 16425 16426 /* ARMv8.2 fp16 vmov.f16 instruction. */ 16427 if (rs == NS_HI) 16428 do_scalar_fp16_v82_encode (); 15846 16429 } 15847 16430 else … … 15849 16432 break; 15850 16433 16434 case NS_RH: 15851 16435 case NS_RF: /* case 12 (fmrs). */ 15852 16436 do_vfp_nsyn_opcode ("fmrs"); 16437 /* ARMv8.2 fp16 vmov.f16 instruction. */ 16438 if (rs == NS_RH) 16439 do_scalar_fp16_v82_encode (); 15853 16440 break; 15854 16441 16442 case NS_HR: 15855 16443 case NS_FR: /* case 13 (fmsr). */ 15856 16444 do_vfp_nsyn_opcode ("fmsr"); 16445 /* ARMv8.2 fp16 vmov.f16 instruction. */ 16446 if (rs == NS_HR) 16447 do_scalar_fp16_v82_encode (); 15857 16448 break; 15858 16449 … … 15911 16502 15912 16503 static void 16504 do_neon_movhf (void) 16505 { 16506 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL); 16507 constraint (rs != NS_HH, _("invalid suffix")); 16508 16509 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), 16510 _(BAD_FPU)); 16511 16512 do_vfp_sp_monadic (); 16513 16514 inst.is_neon = 1; 16515 inst.instruction |= 0xf0000000; 16516 } 16517 16518 static void 15913 16519 do_neon_movl (void) 15914 16520 { … … 15970 16576 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 15971 16577 struct neon_type_el et = neon_check_type (2, rs, 15972 N_EQK | N_FLT, N_F 32 | N_U32 | N_KEY);16578 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY); 15973 16579 inst.instruction |= (et.type == NT_float) << 8; 15974 16580 neon_two_same (neon_quad (rs), 1, et.size); … … 16077 16683 inst.error = _("Use of PC here is UNPREDICTABLE"); 16078 16684 else if (warn_on_deprecated) 16079 as_ warn(_("Use of PC here is deprecated"));16685 as_tsktsk (_("Use of PC here is deprecated")); 16080 16686 } 16081 16687 … … 16086 16692 else 16087 16693 do_vfp_nsyn_opcode ("fsts"); 16694 16695 /* ARMv8.2 vldr.16/vstr.16 instruction. */ 16696 if (inst.vectype.el[0].size == 16) 16697 do_scalar_fp16_v82_encode (); 16088 16698 } 16089 16699 else … … 16171 16781 16172 16782 static int 16173 neon_alignment_bit (int size, int align, int *do_align , ...)16783 neon_alignment_bit (int size, int align, int *do_alignment, ...) 16174 16784 { 16175 16785 va_list ap; … … 16178 16788 if (!inst.operands[1].immisalign) 16179 16789 { 16180 *do_align = 0;16790 *do_alignment = 0; 16181 16791 return SUCCESS; 16182 16792 } 16183 16793 16184 va_start (ap, do_align );16794 va_start (ap, do_alignment); 16185 16795 16186 16796 do … … 16199 16809 16200 16810 if (result == SUCCESS) 16201 *do_align = 1;16811 *do_alignment = 1; 16202 16812 else 16203 16813 first_error (_("unsupported alignment for instruction")); … … 16210 16820 { 16211 16821 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 16212 int align_good, do_align = 0;16822 int align_good, do_alignment = 0; 16213 16823 int logsize = neon_logbits (et.size); 16214 16824 int align = inst.operands[1].imm >> 8; … … 16230 16840 { 16231 16841 case 0: /* VLD1 / VST1. */ 16232 align_good = neon_alignment_bit (et.size, align, &do_align , 16, 16,16842 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16, 16233 16843 32, 32, -1); 16234 16844 if (align_good == FAIL) 16235 16845 return; 16236 if (do_align )16846 if (do_alignment) 16237 16847 { 16238 16848 unsigned alignbits = 0; … … 16248 16858 16249 16859 case 1: /* VLD2 / VST2. */ 16250 align_good = neon_alignment_bit (et.size, align, &do_align , 8, 16, 16, 32,16251 16860 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16, 16861 16, 32, 32, 64, -1); 16252 16862 if (align_good == FAIL) 16253 16863 return; 16254 if (do_align )16864 if (do_alignment) 16255 16865 inst.instruction |= 1 << 4; 16256 16866 break; … … 16262 16872 16263 16873 case 3: /* VLD4 / VST4. */ 16264 align_good = neon_alignment_bit (et.size, align, &do_align , 8, 32,16874 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32, 16265 16875 16, 64, 32, 64, 32, 128, -1); 16266 16876 if (align_good == FAIL) 16267 16877 return; 16268 if (do_align )16878 if (do_alignment) 16269 16879 { 16270 16880 unsigned alignbits = 0; … … 16297 16907 { 16298 16908 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 16299 int align_good, do_align = 0;16909 int align_good, do_alignment = 0; 16300 16910 16301 16911 if (et.type == NT_invtype) … … 16307 16917 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2); 16308 16918 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 16309 &do_align , 16, 16, 32, 32, -1);16919 &do_alignment, 16, 16, 32, 32, -1); 16310 16920 if (align_good == FAIL) 16311 16921 return; … … 16321 16931 case 1: /* VLD2. */ 16322 16932 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 16323 &do_align, 8, 16, 16, 32, 32, 64, -1); 16933 &do_alignment, 8, 16, 16, 32, 32, 64, 16934 -1); 16324 16935 if (align_good == FAIL) 16325 16936 return; … … 16344 16955 { 16345 16956 int align = inst.operands[1].imm >> 8; 16346 align_good = neon_alignment_bit (et.size, align, &do_align , 8, 32,16957 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32, 16347 16958 16, 64, 32, 64, 32, 128, -1); 16348 16959 if (align_good == FAIL) … … 16362 16973 } 16363 16974 16364 inst.instruction |= do_align << 4;16975 inst.instruction |= do_alignment << 4; 16365 16976 } 16366 16977 … … 16435 17046 do_vfp_nsyn_fpv8 (enum neon_shape rs) 16436 17047 { 17048 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with 17049 D register operands. */ 17050 if (neon_shape_class[rs] == SC_DOUBLE) 17051 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), 17052 _(BAD_FPU)); 17053 16437 17054 NEON_ENCODE (FPV8, inst); 16438 17055 16439 if (rs == NS_FFF) 16440 do_vfp_sp_dyadic (); 17056 if (rs == NS_FFF || rs == NS_HHH) 17057 { 17058 do_vfp_sp_dyadic (); 17059 17060 /* ARMv8.2 fp16 instruction. */ 17061 if (rs == NS_HHH) 17062 do_scalar_fp16_v82_encode (); 17063 } 16441 17064 else 16442 17065 do_vfp_dp_rd_rn_rm (); … … 16468 17091 return; 16469 17092 16470 neon_dyadic_misc (NT_untyped, N_F 32, 0);17093 neon_dyadic_misc (NT_untyped, N_F_16_32, 0); 16471 17094 } 16472 17095 … … 16474 17097 do_vrint_1 (enum neon_cvt_mode mode) 16475 17098 { 16476 enum neon_shape rs = neon_select_shape (NS_ FF, NS_DD, NS_QQ, NS_NULL);17099 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL); 16477 17100 struct neon_type_el et; 16478 17101 … … 16480 17103 return; 16481 17104 16482 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 17105 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with 17106 D register operands. */ 17107 if (neon_shape_class[rs] == SC_DOUBLE) 17108 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), 17109 _(BAD_FPU)); 17110 17111 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY 17112 | N_VFP); 16483 17113 if (et.type != NT_invtype) 16484 17114 { … … 16489 17119 16490 17120 NEON_ENCODE (FPV8, inst); 16491 if (rs == NS_FF )17121 if (rs == NS_FF || rs == NS_HH) 16492 17122 do_vfp_sp_monadic (); 16493 17123 else … … 16508 17138 inst.instruction |= (rs == NS_DD) << 8; 16509 17139 do_vfp_cond_or_thumb (); 17140 17141 /* ARMv8.2 fp16 vrint instruction. */ 17142 if (rs == NS_HH) 17143 do_scalar_fp16_v82_encode (); 16510 17144 } 16511 17145 else … … 16513 17147 /* Neon encodings (or something broken...). */ 16514 17148 inst.error = NULL; 16515 et = neon_check_type (2, rs, N_EQK, N_F 32 | N_KEY);17149 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY); 16516 17150 16517 17151 if (et.type == NT_invtype) … … 16529 17163 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 16530 17164 inst.instruction |= neon_quad (rs) << 6; 17165 /* Mask off the original size bits and reencode them. */ 17166 inst.instruction = ((inst.instruction & 0xfff3ffff) 17167 | neon_logbits (et.size) << 18); 17168 16531 17169 switch (mode) 16532 17170 { … … 17093 17731 17094 17732 if (warn_on_deprecated && unified_syntax) 17095 as_ warn(_("conditional infixes are deprecated in unified syntax"));17733 as_tsktsk (_("conditional infixes are deprecated in unified syntax")); 17096 17734 affix = base + (opcode->tag - OT_odd_infix_0); 17097 17735 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2); … … 17179 17817 && (opcode->tag == OT_cinfix3 17180 17818 || opcode->tag == OT_cinfix3_deprecated)) 17181 as_ warn(_("conditional infixes are deprecated in unified syntax"));17819 as_tsktsk (_("conditional infixes are deprecated in unified syntax")); 17182 17820 17183 17821 inst.cond = cond->value; … … 17336 17974 { 17337 17975 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB) 17338 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ arch_t2))17976 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)) 17339 17977 { 17340 17978 /* Automatically generate the IT instruction. */ … … 17509 18147 if (inst.instruction >= 0x10000) 17510 18148 { 17511 as_ warn(_("IT blocks containing 32-bit Thumb instructions are "18149 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are " 17512 18150 "deprecated in ARMv8")); 17513 18151 now_it.warn_deprecated = TRUE; … … 17521 18159 if ((inst.instruction & p->mask) == p->pattern) 17522 18160 { 17523 as_ warn(_("IT blocks containing 16-bit Thumb instructions "18161 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions " 17524 18162 "of the following class are deprecated in ARMv8: " 17525 18163 "%s"), p->description); … … 17534 18172 if (now_it.block_length > 1) 17535 18173 { 17536 as_ warn(_("IT blocks containing more than one conditional "18174 as_tsktsk (_("IT blocks containing more than one conditional " 17537 18175 "instruction are deprecated in ARMv8")); 17538 18176 now_it.warn_deprecated = TRUE; … … 17566 18204 17567 18205 return now_it.state != OUTSIDE_IT_BLOCK; 18206 } 18207 18208 /* Whether OPCODE only has T32 encoding. Since this function is only used by 18209 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed 18210 here, hence the "known" in the function name. */ 18211 18212 static bfd_boolean 18213 known_t32_only_insn (const struct asm_opcode *opcode) 18214 { 18215 /* Original Thumb-1 wide instruction. */ 18216 if (opcode->tencode == do_t_blx 18217 || opcode->tencode == do_t_branch23 18218 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr) 18219 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)) 18220 return TRUE; 18221 18222 /* Wide-only instruction added to ARMv8-M Baseline. */ 18223 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only) 18224 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics) 18225 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m) 18226 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div)) 18227 return TRUE; 18228 18229 return FALSE; 18230 } 18231 18232 /* Whether wide instruction variant can be used if available for a valid OPCODE 18233 in ARCH. */ 18234 18235 static bfd_boolean 18236 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode) 18237 { 18238 if (known_t32_only_insn (opcode)) 18239 return TRUE; 18240 18241 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability 18242 of variant T3 of B.W is checked in do_t_branch. */ 18243 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m) 18244 && opcode->tencode == do_t_branch) 18245 return TRUE; 18246 18247 /* Wide instruction variants of all instructions with narrow *and* wide 18248 variants become available with ARMv6t2. Other opcodes are either 18249 narrow-only or wide-only and are thus available if OPCODE is valid. */ 18250 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2)) 18251 return TRUE; 18252 18253 /* OPCODE with narrow only instruction variant or wide variant not 18254 available. */ 18255 return FALSE; 17568 18256 } 17569 18257 … … 17598 18286 17599 18287 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated) 17600 as_ warn(_("s suffix on comparison instruction is deprecated"));18288 as_tsktsk (_("s suffix on comparison instruction is deprecated")); 17601 18289 17602 18290 /* The value which unconditional instructions should have in place of the … … 17617 18305 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant))) 17618 18306 { 17619 as_bad (_("selected processor does not support Thumb mode `%s'"), str);18307 as_bad (_("selected processor does not support `%s' in Thumb mode"), str); 17620 18308 return; 17621 18309 } … … 17627 18315 } 17628 18316 17629 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)) 17630 { 17631 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23 17632 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr) 17633 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier))) 18317 /* Two things are addressed here: 18318 1) Implicit require narrow instructions on Thumb-1. 18319 This avoids relaxation accidentally introducing Thumb-2 18320 instructions. 18321 2) Reject wide instructions in non Thumb-2 cores. 18322 18323 Only instructions with narrow and wide variants need to be handled 18324 but selecting all non wide-only instructions is easier. */ 18325 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) 18326 && !t32_insn_ok (variant, opcode)) 18327 { 18328 if (inst.size_req == 0) 18329 inst.size_req = 2; 18330 else if (inst.size_req == 4) 17634 18331 { 17635 /* Two things are addressed here. 17636 1) Implicit require narrow instructions on Thumb-1. 17637 This avoids relaxation accidentally introducing Thumb-2 17638 instructions. 17639 2) Reject wide instructions in non Thumb-2 cores. */ 17640 if (inst.size_req == 0) 17641 inst.size_req = 2; 17642 else if (inst.size_req == 4) 17643 { 17644 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str); 17645 return; 17646 } 18332 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m)) 18333 as_bad (_("selected processor does not support 32bit wide " 18334 "variant of instruction `%s'"), str); 18335 else 18336 as_bad (_("selected processor does not support `%s' in " 18337 "Thumb-2 mode"), str); 18338 return; 17647 18339 } 17648 18340 } … … 17679 18371 *opcode->tvariant); 17680 18372 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly 17681 set those bits when Thumb-2 32-bit instructions are seen. ie. 17682 anything other than bl/blx and v6-M instructions. 17683 This is overly pessimistic for relaxable instructions. */ 17684 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800) 17685 || inst.relax) 17686 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr) 17687 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))) 18373 set those bits when Thumb-2 32-bit instructions are seen. The impact 18374 of relaxable instructions will be considered later after we finish all 18375 relaxation. */ 18376 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any)) 18377 variant = arm_arch_none; 18378 else 18379 variant = cpu_variant; 18380 if (inst.size == 4 && !t32_insn_ok (variant, opcode)) 17688 18381 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 17689 18382 arm_ext_v6t2); … … 17708 18401 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))) 17709 18402 { 17710 as_bad (_("selected processor does not support ARM mode `%s'"), str);18403 as_bad (_("selected processor does not support `%s' in ARM mode"), str); 17711 18404 return; 17712 18405 } … … 17719 18412 inst.instruction = opcode->avalue; 17720 18413 if (opcode->tag == OT_unconditionalF) 17721 inst.instruction |= 0xF << 28;18414 inst.instruction |= 0xFU << 28; 17722 18415 else 17723 18416 inst.instruction |= inst.cond << 28; … … 18105 18798 {"epsr", 6 }, {"EPSR", 6 }, 18106 18799 {"iepsr", 7 }, {"IEPSR", 7 }, 18107 {"msp", 8 }, {"MSP", 8 }, 18108 {"psp", 9 }, {"PSP", 9 }, 18800 {"msp", 8 }, {"MSP", 8 }, {"msp_s", 8 }, {"MSP_S", 8 }, 18801 {"psp", 9 }, {"PSP", 9 }, {"psp_s", 9 }, {"PSP_S", 9 }, 18109 18802 {"primask", 16}, {"PRIMASK", 16}, 18110 18803 {"basepri", 17}, {"BASEPRI", 17}, … … 18112 18805 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */ 18113 18806 {"faultmask", 19}, {"FAULTMASK", 19}, 18114 {"control", 20}, {"CONTROL", 20} 18807 {"control", 20}, {"CONTROL", 20}, 18808 {"msp_ns", 0x88}, {"MSP_NS", 0x88}, 18809 {"psp_ns", 0x89}, {"PSP_NS", 0x89} 18115 18810 }; 18116 18811 … … 18172 18867 18173 18868 #define UL_BARRIER(L,U,CODE,FEAT) \ 18174 { L, CODE, ARM_FEATURE (FEAT, 0) }, \18175 { U, CODE, ARM_FEATURE (FEAT, 0) }18869 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \ 18870 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) } 18176 18871 18177 18872 static struct asm_barrier_opt barrier_opt_names[] = … … 18409 19104 18410 19105 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp), 18411 tC3("movs", 1b00000, _movs, 2, (RR, SH ),mov, t_mov_cmp),19106 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp), 18412 19107 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst), 18413 19108 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst), … … 18653 19348 18654 19349 #undef THUMB_VARIANT 18655 #define THUMB_VARIANT & arm_ext_v6t2 19350 #define THUMB_VARIANT & arm_ext_v6t2_v8m 18656 19351 18657 19352 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex), 18658 19353 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR), 18659 19354 strex, t_strex), 19355 #undef THUMB_VARIANT 19356 #define THUMB_VARIANT & arm_ext_v6t2 19357 18660 19358 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 18661 19359 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), … … 18685 19383 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs), 18686 19384 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs), 19385 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps), 18687 19386 18688 19387 /* ARM V6 not included in V7M (eg. integer SIMD). */ 18689 19388 #undef THUMB_VARIANT 18690 19389 #define THUMB_VARIANT & arm_ext_v6_dsp 18691 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),18692 19390 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt), 18693 19391 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb), … … 18803 19501 18804 19502 #undef THUMB_VARIANT 18805 #define THUMB_VARIANT & arm_ext_v6t2 19503 #define THUMB_VARIANT & arm_ext_v6t2_v8m 18806 19504 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb), 18807 19505 rd_rn, rd_rn), … … 18829 19527 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs), 18830 19528 19529 #undef ARM_VARIANT 19530 #define ARM_VARIANT & arm_ext_pan 19531 #undef THUMB_VARIANT 19532 #define THUMB_VARIANT & arm_ext_pan 19533 19534 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan), 19535 18831 19536 #undef ARM_VARIANT 18832 19537 #define ARM_VARIANT & arm_ext_v6t2 … … 18840 19545 18841 19546 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 18842 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),18843 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),18844 19547 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit), 18845 19548 … … 18848 19551 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt), 18849 19552 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt), 19553 19554 #undef THUMB_VARIANT 19555 #define THUMB_VARIANT & arm_ext_v6t2_v8m 19556 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16), 19557 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16), 18850 19558 18851 19559 /* Thumb-only instructions. */ … … 18860 19568 #undef ARM_VARIANT 18861 19569 #define ARM_VARIANT & arm_ext_v1 19570 #undef THUMB_VARIANT 19571 #define THUMB_VARIANT & arm_ext_v6t2 18862 19572 18863 19573 TUE("it", bf08, bf08, 1, (COND), it, t_it), … … 18929 19639 #undef ARM_VARIANT 18930 19640 #define ARM_VARIANT & arm_ext_v8 19641 19642 /* Instructions shared between armv8-a and armv8-m. */ 18931 19643 #undef THUMB_VARIANT 18932 #define THUMB_VARIANT & arm_ext_v8 18933 18934 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint), 18935 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt), 18936 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 18937 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb), 18938 ldrexd, t_ldrexd), 18939 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn), 18940 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 18941 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb), 18942 stlex, t_stlex), 18943 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), 18944 strexd, t_strexd), 18945 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb), 18946 stlex, t_stlex), 18947 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb), 18948 stlex, t_stlex), 19644 #define THUMB_VARIANT & arm_ext_atomics 19645 18949 19646 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 18950 19647 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), … … 18953 19650 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn), 18954 19651 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn), 18955 19652 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 19653 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn), 19654 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 19655 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb), 19656 stlex, t_stlex), 19657 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb), 19658 stlex, t_stlex), 19659 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb), 19660 stlex, t_stlex), 19661 #undef THUMB_VARIANT 19662 #define THUMB_VARIANT & arm_ext_v8 19663 19664 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint), 19665 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt), 19666 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb), 19667 ldrexd, t_ldrexd), 19668 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), 19669 strexd, t_strexd), 18956 19670 /* ARMv8 T32 only. */ 18957 19671 #undef ARM_VARIANT … … 18963 19677 /* FP for ARMv8. */ 18964 19678 #undef ARM_VARIANT 18965 #define ARM_VARIANT & fpu_vfp_ext_armv8 19679 #define ARM_VARIANT & fpu_vfp_ext_armv8xd 18966 19680 #undef THUMB_VARIANT 18967 #define THUMB_VARIANT & fpu_vfp_ext_armv8 19681 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd 18968 19682 18969 19683 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel), … … 19016 19730 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch), 19017 19731 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw), 19732 19733 /* ARMv8.2 RAS extension. */ 19734 #undef ARM_VARIANT 19735 #define ARM_VARIANT & arm_ext_ras 19736 #undef THUMB_VARIANT 19737 #define THUMB_VARIANT & arm_ext_ras 19738 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs), 19018 19739 19019 19740 #undef ARM_VARIANT … … 19632 20353 NCE(vmov, 0, 1, (VMOV), neon_mov), 19633 20354 NCE(vmovq, 0, 1, (VMOV), neon_mov), 20355 20356 #undef ARM_VARIANT 20357 #define ARM_VARIANT & arm_ext_fp16 20358 #undef THUMB_VARIANT 20359 #define THUMB_VARIANT & arm_ext_fp16 20360 /* New instructions added from v8.2, allowing the extraction and insertion of 20361 the upper 16 bits of a 32-bit vector register. */ 20362 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf), 20363 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf), 19634 20364 19635 20365 #undef THUMB_VARIANT … … 19682 20412 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 19683 20413 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 19684 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F 32. */20414 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */ 19685 20415 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 19686 20416 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), … … 19734 20464 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 19735 20465 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step), 20466 /* ARM v8.1 extension. */ 20467 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah), 20468 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah), 20469 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah), 20470 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah), 19736 20471 19737 20472 /* Two address, int/float. Types S8 S16 S32 F32. */ … … 19840 20575 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long), 19841 20576 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long), 19842 /* Reciprocal estimates. Types U32F32. */20577 /* Reciprocal estimates. Types U32 F16 F32. */ 19843 20578 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est), 19844 20579 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est), … … 20246 20981 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 20247 20982 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 20983 20984 /* ARMv8-M instructions. */ 20985 #undef ARM_VARIANT 20986 #define ARM_VARIANT NULL 20987 #undef THUMB_VARIANT 20988 #define THUMB_VARIANT & arm_ext_v8m 20989 TUE("sg", 0, e97fe97f, 0, (), 0, noargs), 20990 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx), 20991 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx), 20992 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt), 20993 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt), 20994 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt), 20995 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt), 20996 20997 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the 20998 instructions behave as nop if no VFP is present. */ 20999 #undef THUMB_VARIANT 21000 #define THUMB_VARIANT & arm_ext_v8m_main 21001 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn), 21002 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn), 20248 21003 }; 20249 21004 #undef ARM_VARIANT … … 20519 21274 fixp->fx_line = fragp->fr_line; 20520 21275 fragp->fr_fix += fragp->fr_var; 21276 21277 /* Set whether we use thumb-2 ISA based on final relaxation results. */ 21278 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected () 21279 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2)) 21280 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2); 20521 21281 } 20522 21282 … … 20796 21556 20797 21557 align = bfd_get_section_alignment (stdoutput, segment); 20798 size = ((size + (1 << align) - 1) & ( (valueT) -1 << align));21558 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align))); 20799 21559 } 20800 21560 #endif … … 20809 21569 arm_handle_align (fragS * fragP) 20810 21570 { 20811 static char const arm_noop[2][2][4] =21571 static unsigned char const arm_noop[2][2][4] = 20812 21572 { 20813 21573 { /* ARMv1 */ … … 20820 21580 }, 20821 21581 }; 20822 static char const thumb_noop[2][2][2] =21582 static unsigned char const thumb_noop[2][2][2] = 20823 21583 { 20824 21584 { /* Thumb-1 */ … … 20831 21591 } 20832 21592 }; 20833 static char const wide_thumb_noop[2][4] =21593 static unsigned char const wide_thumb_noop[2][4] = 20834 21594 { /* Wide Thumb-2 */ 20835 21595 {0xaf, 0xf3, 0x00, 0x80}, /* LE */ … … 20839 21599 unsigned bytes, fix, noop_size; 20840 21600 char * p; 20841 const char * noop;20842 const char *narrow_noop = NULL;21601 const unsigned char * noop; 21602 const unsigned char *narrow_noop = NULL; 20843 21603 #ifdef OBJ_ELF 20844 21604 enum mstate state; … … 20971 21731 arm_init_frag (fragS * fragP, int max_chars) 20972 21732 { 21733 int frag_thumb_mode; 21734 20973 21735 /* If the current ARM vs THUMB mode has not already 20974 21736 been recorded into this frag then do so now. */ 20975 21737 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0) 20976 {20977 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED; 20978 20979 /* Record a mapping symbol for alignment frags. We will delete this 20980 later if the alignment ends up empty. */ 20981 switch (fragP->fr_type)20982 { 20983 case rs_align: 20984 case rs_align_test:20985 case rs_fill:20986 mapping_state_2 (MAP_DATA, max_chars); 20987 break;20988 case rs_align_code: 20989 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars); 20990 break;20991 default: 20992 break; 20993 } 21738 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED; 21739 21740 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED; 21741 21742 /* Record a mapping symbol for alignment frags. We will delete this 21743 later if the alignment ends up empty. */ 21744 switch (fragP->fr_type) 21745 { 21746 case rs_align: 21747 case rs_align_test: 21748 case rs_fill: 21749 mapping_state_2 (MAP_DATA, max_chars); 21750 break; 21751 case rs_align_code: 21752 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars); 21753 break; 21754 default: 21755 break; 20994 21756 } 20995 21757 } … … 21050 21812 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE; 21051 21813 if (unwind.opcodes) 21052 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,21053 21814 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes, 21815 unwind.opcode_alloc); 21054 21816 else 21055 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);21817 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc); 21056 21818 } 21057 21819 while (length > 0) … … 21157 21919 const char * prefix_once; 21158 21920 const char * group_name; 21159 size_t prefix_len;21160 size_t text_len;21161 21921 char * sec_name; 21162 size_t sec_name_len;21163 21922 int type; 21164 21923 int flags; … … 21189 21948 } 21190 21949 21191 prefix_len = strlen (prefix); 21192 text_len = strlen (text_name); 21193 sec_name_len = prefix_len + text_len; 21194 sec_name = (char *) xmalloc (sec_name_len + 1); 21195 memcpy (sec_name, prefix, prefix_len); 21196 memcpy (sec_name + prefix_len, text_name, text_len); 21197 sec_name[prefix_len + text_len] = '\0'; 21950 sec_name = concat (prefix, text_name, (char *) NULL); 21198 21951 21199 21952 flags = SHF_ALLOC; … … 21565 22318 return base; 21566 22319 } 22320 } 22321 22322 static bfd_boolean flag_warn_syms = TRUE; 22323 22324 bfd_boolean 22325 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name) 22326 { 22327 /* PR 18347 - Warn if the user attempts to create a symbol with the same 22328 name as an ARM instruction. Whilst strictly speaking it is allowed, it 22329 does mean that the resulting code might be very confusing to the reader. 22330 Also this warning can be triggered if the user omits an operand before 22331 an immediate address, eg: 22332 22333 LDR =foo 22334 22335 GAS treats this as an assignment of the value of the symbol foo to a 22336 symbol LDR, and so (without this code) it will not issue any kind of 22337 warning or error message. 22338 22339 Note - ARM instructions are case-insensitive but the strings in the hash 22340 table are all stored in lower case, so we must first ensure that name is 22341 lower case too. */ 22342 if (flag_warn_syms && arm_ops_hsh) 22343 { 22344 char * nbuf = strdup (name); 22345 char * p; 22346 22347 for (p = nbuf; *p; p++) 22348 *p = TOLOWER (*p); 22349 if (hash_find (arm_ops_hsh, nbuf) != NULL) 22350 { 22351 static struct hash_control * already_warned = NULL; 22352 22353 if (already_warned == NULL) 22354 already_warned = hash_new (); 22355 /* Only warn about the symbol once. To keep the code 22356 simple we let hash_insert do the lookup for us. */ 22357 if (hash_insert (already_warned, name, NULL) == NULL) 22358 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name); 22359 } 22360 else 22361 free (nbuf); 22362 } 22363 22364 return FALSE; 21567 22365 } 21568 22366 … … 22592 23390 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff)) 22593 23391 { 22594 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ arch_t2)))23392 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))) 22595 23393 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE); 22596 23394 else if ((value & ~0x1ffffff) … … 22691 23489 case BFD_RELOC_ARM_CP_OFF_IMM: 22692 23490 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 22693 if (value < -1023 || value > 1023 || (value & 3)) 23491 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM) 23492 newval = md_chars_to_number (buf, INSN_SIZE); 23493 else 23494 newval = get_thumb32_insn (buf); 23495 if ((newval & 0x0f200f00) == 0x0d000900) 23496 { 23497 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic 23498 has permitted values that are multiples of 2, in the range 0 23499 to 510. */ 23500 if (value < -510 || value > 510 || (value & 1)) 23501 as_bad_where (fixP->fx_file, fixP->fx_line, 23502 _("co-processor offset out of range")); 23503 } 23504 else if (value < -1023 || value > 1023 || (value & 3)) 22694 23505 as_bad_where (fixP->fx_file, fixP->fx_line, 22695 23506 _("co-processor offset out of range")); … … 22708 23519 { 22709 23520 newval &= 0xff7fff00; 23521 if ((newval & 0x0f200f00) == 0x0d000900) 23522 { 23523 /* This is a fp16 vstr/vldr. 23524 23525 It requires the immediate offset in the instruction is shifted 23526 left by 1 to be a half-word offset. 23527 23528 Here, left shift by 1 first, and later right shift by 2 23529 should get the right offset. */ 23530 value <<= 1; 23531 } 22710 23532 newval |= (value >> 2) | (sign ? INDEX_UP : 0); 22711 23533 } … … 22833 23655 if (rd == REG_SP) 22834 23656 { 22835 if (value & ~0x1fc)23657 if (value & ~0x1fc) 22836 23658 as_bad_where (fixP->fx_file, fixP->fx_line, 22837 23659 _("invalid immediate for stack address calculation")); … … 22841 23663 else if (rs == REG_PC || rs == REG_SP) 22842 23664 { 23665 /* PR gas/18541. If the addition is for a defined symbol 23666 within range of an ADR instruction then accept it. */ 23667 if (subtract 23668 && value == 4 23669 && fixP->fx_addsy != NULL) 23670 { 23671 subtract = 0; 23672 23673 if (! S_IS_DEFINED (fixP->fx_addsy) 23674 || S_GET_SEGMENT (fixP->fx_addsy) != seg 23675 || S_IS_WEAK (fixP->fx_addsy)) 23676 { 23677 as_bad_where (fixP->fx_file, fixP->fx_line, 23678 _("address calculation needs a strongly defined nearby symbol")); 23679 } 23680 else 23681 { 23682 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address; 23683 23684 /* Round up to the next 4-byte boundary. */ 23685 if (v & 3) 23686 v = (v + 3) & ~ 3; 23687 else 23688 v += 4; 23689 v = S_GET_VALUE (fixP->fx_addsy) - v; 23690 23691 if (v & ~0x3fc) 23692 { 23693 as_bad_where (fixP->fx_file, fixP->fx_line, 23694 _("symbol too far away")); 23695 } 23696 else 23697 { 23698 fixP->fx_done = 1; 23699 value = v; 23700 } 23701 } 23702 } 23703 22843 23704 if (subtract || value & ~0x3fc) 22844 23705 as_bad_where (fixP->fx_file, fixP->fx_line, 22845 23706 _("invalid immediate for address calculation (value = 0x%08lX)"), 22846 (unsigned long) value);23707 (unsigned long) (subtract ? - value : value)); 22847 23708 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP); 22848 23709 newval |= rd << 8; … … 22942 23803 return; 22943 23804 23805 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC: 23806 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC: 23807 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC: 23808 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC: 23809 gas_assert (!fixP->fx_done); 23810 { 23811 bfd_vma insn; 23812 bfd_boolean is_mov; 23813 bfd_vma encoded_addend = value; 23814 23815 /* Check that addend can be encoded in instruction. */ 23816 if (!seg->use_rela_p && (value < 0 || value > 255)) 23817 as_bad_where (fixP->fx_file, fixP->fx_line, 23818 _("the offset 0x%08lX is not representable"), 23819 (unsigned long) encoded_addend); 23820 23821 /* Extract the instruction. */ 23822 insn = md_chars_to_number (buf, THUMB_SIZE); 23823 is_mov = (insn & 0xf800) == 0x2000; 23824 23825 /* Encode insn. */ 23826 if (is_mov) 23827 { 23828 if (!seg->use_rela_p) 23829 insn |= encoded_addend; 23830 } 23831 else 23832 { 23833 int rd, rs; 23834 23835 /* Extract the instruction. */ 23836 /* Encoding is the following 23837 0x8000 SUB 23838 0x00F0 Rd 23839 0x000F Rs 23840 */ 23841 /* The following conditions must be true : 23842 - ADD 23843 - Rd == Rs 23844 - Rd <= 7 23845 */ 23846 rd = (insn >> 4) & 0xf; 23847 rs = insn & 0xf; 23848 if ((insn & 0x8000) || (rd != rs) || rd > 7) 23849 as_bad_where (fixP->fx_file, fixP->fx_line, 23850 _("Unable to process relocation for thumb opcode: %lx"), 23851 (unsigned long) insn); 23852 23853 /* Encode as ADD immediate8 thumb 1 code. */ 23854 insn = 0x3000 | (rd << 8); 23855 23856 /* Place the encoded addend into the first 8 bits of the 23857 instruction. */ 23858 if (!seg->use_rela_p) 23859 insn |= encoded_addend; 23860 } 23861 23862 /* Update the instruction. */ 23863 md_number_to_chars (buf, insn, THUMB_SIZE); 23864 } 23865 break; 23866 22944 23867 case BFD_RELOC_ARM_ALU_PC_G0_NC: 22945 23868 case BFD_RELOC_ARM_ALU_PC_G0: … … 23132 24055 bfd_reloc_code_real_type code; 23133 24056 23134 reloc = (arelent *) xmalloc (sizeof (arelent));23135 23136 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));24057 reloc = XNEW (arelent); 24058 24059 reloc->sym_ptr_ptr = XNEW (asymbol *); 23137 24060 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 23138 24061 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; … … 23247 24170 case BFD_RELOC_ARM_PREL31: 23248 24171 case BFD_RELOC_ARM_TARGET2: 23249 case BFD_RELOC_ARM_TLS_LE32:23250 24172 case BFD_RELOC_ARM_TLS_LDO32: 23251 24173 case BFD_RELOC_ARM_PCREL_CALL: … … 23280 24202 case BFD_RELOC_ARM_LDC_SB_G2: 23281 24203 case BFD_RELOC_ARM_V4BX: 24204 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC: 24205 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC: 24206 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC: 24207 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC: 23282 24208 code = fixp->fx_r_type; 23283 24209 break; … … 23285 24211 case BFD_RELOC_ARM_TLS_GOTDESC: 23286 24212 case BFD_RELOC_ARM_TLS_GD32: 24213 case BFD_RELOC_ARM_TLS_LE32: 23287 24214 case BFD_RELOC_ARM_TLS_IE32: 23288 24215 case BFD_RELOC_ARM_TLS_LDM32: … … 23328 24255 default: 23329 24256 { 23330 c har * type;24257 const char * type; 23331 24258 23332 24259 switch (fixp->fx_r_type) … … 23581 24508 return FALSE; 23582 24509 24510 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited 24511 offsets, so keep these symbols. */ 24512 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC 24513 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) 24514 return FALSE; 24515 23583 24516 return TRUE; 23584 24517 } … … 23586 24519 23587 24520 #ifdef OBJ_ELF 23588 23589 24521 const char * 23590 24522 elf32_arm_target_format (void) … … 23772 24704 otherwise tag label as .code 16. */ 23773 24705 if (THUMB_IS_FUNC (sym)) 23774 elf_sym->internal_elf_sym.st_target_internal23775 = ST_BRANCH_TO_THUMB;24706 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal, 24707 ST_BRANCH_TO_THUMB); 23776 24708 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) 23777 24709 elf_sym->internal_elf_sym.st_info = … … 23911 24843 selected_cpu = cpu_default; 23912 24844 } 24845 else if (no_cpu_selected ()) 24846 selected_cpu = cpu_default; 23913 24847 #else 23914 24848 if (mcpu_cpu_opt) … … 24058 24992 24059 24993 -m[no-]warn-deprecated Warn about deprecated features 24994 -m[no-]warn-syms Warn when symbols match instructions 24060 24995 24061 24996 For now we will also provide support for: … … 24126 25061 }; 24127 25062 25063 24128 25064 size_t md_longopts_size = sizeof (md_longopts); 24129 25065 24130 25066 struct arm_option_table 24131 25067 { 24132 c har *option; /* Option name to match. */24133 c har *help; /* Help information. */25068 const char *option; /* Option name to match. */ 25069 const char *help; /* Help information. */ 24134 25070 int *var; /* Variable to change. */ 24135 25071 int value; /* What to change it to. */ 24136 c har *deprecated; /* If non-null, print this message. */25072 const char *deprecated; /* If non-null, print this message. */ 24137 25073 }; 24138 25074 … … 24160 25096 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"), 24161 25097 &warn_on_deprecated, 0, NULL}, 25098 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL}, 25099 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL}, 24162 25100 {NULL, NULL, NULL, 0, NULL} 24163 25101 }; … … 24165 25103 struct arm_legacy_option_table 24166 25104 { 24167 c har *option; /* Option name to match. */25105 const char *option; /* Option name to match. */ 24168 25106 const arm_feature_set **var; /* Variable to change. */ 24169 25107 const arm_feature_set value; /* What to change it to. */ 24170 c har *deprecated; /* If non-null, print this message. */25108 const char *deprecated; /* If non-null, print this message. */ 24171 25109 }; 24172 25110 … … 24286 25224 struct arm_cpu_option_table 24287 25225 { 24288 c har *name;25226 const char *name; 24289 25227 size_t name_len; 24290 25228 const arm_feature_set value; … … 24386 25324 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL), 24387 25325 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL), 24388 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6 ZK, FPU_NONE, NULL),24389 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6 ZK, FPU_ARCH_VFP_V2, NULL),25326 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL), 25327 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL), 24390 25328 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC, 24391 25329 FPU_NONE, "Cortex-A5"), … … 24393 25331 "Cortex-A7"), 24394 25332 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC, 24395 ARM_FEATURE (0,FPU_VFP_V325333 ARM_FEATURE_COPROC (FPU_VFP_V3 24396 25334 | FPU_NEON_EXT_V1), 24397 25335 "Cortex-A8"), 24398 25336 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC, 24399 ARM_FEATURE (0,FPU_VFP_V325337 ARM_FEATURE_COPROC (FPU_VFP_V3 24400 25338 | FPU_NEON_EXT_V1), 24401 25339 "Cortex-A9"), … … 24406 25344 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4, 24407 25345 "Cortex-A17"), 25346 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25347 "Cortex-A32"), 25348 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25349 "Cortex-A35"), 24408 25350 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 24409 25351 "Cortex-A53"), 24410 25352 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 24411 25353 "Cortex-A57"), 25354 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25355 "Cortex-A72"), 25356 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25357 "Cortex-A73"), 24412 25358 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"), 24413 25359 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16, … … 24418 25364 FPU_ARCH_VFP_V3D16, 24419 25365 "Cortex-R7"), 25366 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV, 25367 FPU_ARCH_VFP_V3D16, 25368 "Cortex-R8"), 25369 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"), 24420 25370 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"), 24421 25371 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"), … … 24423 25373 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"), 24424 25374 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"), 25375 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25376 "Samsung " \ 25377 "Exynos M1"), 25378 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25379 "Qualcomm " 25380 "QDF24XX"), 25381 24425 25382 /* ??? XSCALE is really an architecture. */ 24426 25383 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL), … … 24430 25387 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL), 24431 25388 /* Maverick */ 24432 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),25389 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), 24433 25390 FPU_ARCH_MAVERICK, "ARM920T"), 24434 25391 /* Marvell processors. */ 24435 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0), 25392 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP 25393 | ARM_EXT_SEC, 25394 ARM_EXT2_V6T2_V8M), 24436 25395 FPU_ARCH_VFP_V3D16, NULL), 25396 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP 25397 | ARM_EXT_SEC, 25398 ARM_EXT2_V6T2_V8M), 25399 FPU_ARCH_NEON_VFP_V4, NULL), 25400 /* APM X-Gene family. */ 25401 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25402 "APM X-Gene 1"), 25403 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 25404 "APM X-Gene 2"), 24437 25405 24438 25406 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL } … … 24442 25410 struct arm_arch_option_table 24443 25411 { 24444 c har *name;25412 const char *name; 24445 25413 size_t name_len; 24446 25414 const arm_feature_set value; … … 24474 25442 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP), 24475 25443 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP), 24476 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP), 25444 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is 25445 kept to preserve existing behaviour. */ 25446 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP), 25447 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP), 24477 25448 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP), 24478 25449 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP), 24479 25450 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP), 24480 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP), 25451 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is 25452 kept to preserve existing behaviour. */ 25453 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP), 25454 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP), 24481 25455 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP), 24482 25456 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP), … … 24492 25466 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP), 24493 25467 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP), 25468 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP), 25469 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP), 24494 25470 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP), 25471 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP), 25472 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP), 24495 25473 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP), 24496 25474 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP), … … 24503 25481 struct arm_option_extension_value_table 24504 25482 { 24505 c har *name;25483 const char *name; 24506 25484 size_t name_len; 24507 const arm_feature_set value; 24508 const arm_feature_set allowed_archs; 25485 const arm_feature_set merge_value; 25486 const arm_feature_set clear_value; 25487 /* List of architectures for which an extension is available. ARM_ARCH_NONE 25488 indicates that an extension is available for all architectures while 25489 ARM_ANY marks an empty entry. */ 25490 const arm_feature_set allowed_archs[2]; 24509 25491 }; 24510 25492 24511 25493 /* The following table must be in alphabetical order with a NULL last entry. 24512 25494 */ 24513 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA } 25495 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } } 25496 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} } 24514 25497 static const struct arm_option_extension_value_table arm_extensions[] = 24515 25498 { 24516 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)), 25499 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8), 25500 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 24517 25501 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8, 24518 ARM_FEATURE (ARM_EXT_V8, 0)), 24519 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, 24520 ARM_FEATURE (ARM_EXT_V8, 0)), 24521 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0), 24522 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)), 24523 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY), 24524 ARM_EXT_OPT ("iwmmxt2", 24525 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY), 24526 ARM_EXT_OPT ("maverick", 24527 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY), 24528 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0), 24529 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)), 24530 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8, 24531 ARM_FEATURE (ARM_EXT_V8, 0)), 24532 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0), 24533 ARM_FEATURE (ARM_EXT_V6M, 0)), 24534 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0), 24535 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)), 24536 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV 24537 | ARM_EXT_DIV, 0), 24538 ARM_FEATURE (ARM_EXT_V7A, 0)), 24539 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY), 24540 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE } 25502 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8), 25503 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 25504 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP), 25505 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP), 25506 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)), 25507 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8), 25508 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 25509 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST), 25510 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST), 25511 ARM_ARCH_V8_2A), 25512 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV), 25513 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV), 25514 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A), 25515 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)), 25516 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), 25517 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE), 25518 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), 25519 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE), 25520 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), 25521 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE), 25522 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP), 25523 ARM_FEATURE_CORE_LOW (ARM_EXT_MP), 25524 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A), 25525 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)), 25526 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS), 25527 ARM_FEATURE_CORE_LOW (ARM_EXT_OS), 25528 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)), 25529 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN), 25530 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0), 25531 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 25532 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS), 25533 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0), 25534 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 25535 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1, 25536 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA), 25537 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 25538 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC), 25539 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC), 25540 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 25541 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)), 25542 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8, 25543 ARM_FEATURE_COPROC (FPU_NEON_ARMV8), 25544 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)), 25545 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV 25546 | ARM_EXT_DIV), 25547 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 25548 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)), 25549 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), 25550 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE), 25551 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } } 24541 25552 }; 24542 25553 #undef ARM_EXT_OPT … … 24545 25556 struct arm_option_fpu_value_table 24546 25557 { 24547 c har *name;25558 const char *name; 24548 25559 const arm_feature_set value; 24549 25560 }; … … 24586 25597 {"vfpv4-d16", FPU_ARCH_VFP_V4D16}, 24587 25598 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16}, 25599 {"fpv5-d16", FPU_ARCH_VFP_V5D16}, 25600 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16}, 24588 25601 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4}, 24589 25602 {"fp-armv8", FPU_ARCH_VFP_ARMV8}, … … 24591 25604 {"crypto-neon-fp-armv8", 24592 25605 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8}, 25606 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1}, 25607 {"crypto-neon-fp-armv8.1", 25608 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1}, 24593 25609 {NULL, ARM_ARCH_NONE} 24594 25610 }; … … 24596 25612 struct arm_option_value_table 24597 25613 { 24598 c har *name;25614 const char *name; 24599 25615 long value; 24600 25616 }; … … 24621 25637 struct arm_long_option_table 24622 25638 { 24623 c har * option; /* Substring to match. */24624 c har * help; /* Help information. */24625 int (* func) (c har * subopt); /* Function to decode sub-option. */24626 c har * deprecated; /* If non-null, print this message. */25639 const char * option; /* Substring to match. */ 25640 const char * help; /* Help information. */ 25641 int (* func) (const char * subopt); /* Function to decode sub-option. */ 25642 const char * deprecated; /* If non-null, print this message. */ 24627 25643 }; 24628 25644 24629 25645 static bfd_boolean 24630 arm_parse_extension (char *str, const arm_feature_set **opt_p) 24631 { 24632 arm_feature_set *ext_set = (arm_feature_set *) 24633 xmalloc (sizeof (arm_feature_set)); 25646 arm_parse_extension (const char *str, const arm_feature_set **opt_p) 25647 { 25648 arm_feature_set *ext_set = XNEW (arm_feature_set); 24634 25649 24635 25650 /* We insist on extensions being specified in alphabetical order, and with … … 24640 25655 -1 -> 1 -> 0. */ 24641 25656 const struct arm_option_extension_value_table * opt = NULL; 25657 const arm_feature_set arm_any = ARM_ANY; 24642 25658 int adding_value = -1; 24643 25659 … … 24648 25664 while (str != NULL && *str != 0) 24649 25665 { 24650 c har *ext;25666 const char *ext; 24651 25667 size_t len; 24652 25668 … … 24704 25720 if (opt->name_len == len && strncmp (opt->name, str, len) == 0) 24705 25721 { 25722 int i, nb_allowed_archs = 25723 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]); 24706 25724 /* Check we can apply the extension to this architecture. */ 24707 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs)) 25725 for (i = 0; i < nb_allowed_archs; i++) 25726 { 25727 /* Empty entry. */ 25728 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any)) 25729 continue; 25730 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set)) 25731 break; 25732 } 25733 if (i == nb_allowed_archs) 24708 25734 { 24709 25735 as_bad (_("extension does not apply to the base architecture")); … … 24713 25739 /* Add or remove the extension. */ 24714 25740 if (adding_value) 24715 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt-> value);25741 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value); 24716 25742 else 24717 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt-> value);25743 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value); 24718 25744 24719 25745 break; … … 24751 25777 24752 25778 static bfd_boolean 24753 arm_parse_cpu (c har *str)25779 arm_parse_cpu (const char *str) 24754 25780 { 24755 25781 const struct arm_cpu_option_table *opt; 24756 c har *ext = strchr (str, '+');25782 const char *ext = strchr (str, '+'); 24757 25783 size_t len; 24758 25784 … … 24774 25800 mcpu_fpu_opt = &opt->default_fpu; 24775 25801 if (opt->canonical_name) 24776 strcpy (selected_cpu_name, opt->canonical_name); 25802 { 25803 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name)); 25804 strcpy (selected_cpu_name, opt->canonical_name); 25805 } 24777 25806 else 24778 25807 { 24779 25808 size_t i; 25809 25810 if (len >= sizeof selected_cpu_name) 25811 len = (sizeof selected_cpu_name) - 1; 24780 25812 24781 25813 for (i = 0; i < len; i++) … … 24795 25827 24796 25828 static bfd_boolean 24797 arm_parse_arch (c har *str)25829 arm_parse_arch (const char *str) 24798 25830 { 24799 25831 const struct arm_arch_option_table *opt; 24800 c har *ext = strchr (str, '+');25832 const char *ext = strchr (str, '+'); 24801 25833 size_t len; 24802 25834 … … 24830 25862 24831 25863 static bfd_boolean 24832 arm_parse_fpu (c har * str)25864 arm_parse_fpu (const char * str) 24833 25865 { 24834 25866 const struct arm_option_fpu_value_table * opt; … … 24846 25878 24847 25879 static bfd_boolean 24848 arm_parse_float_abi (c har * str)25880 arm_parse_float_abi (const char * str) 24849 25881 { 24850 25882 const struct arm_option_value_table * opt; … … 24863 25895 #ifdef OBJ_ELF 24864 25896 static bfd_boolean 24865 arm_parse_eabi (c har * str)25897 arm_parse_eabi (const char * str) 24866 25898 { 24867 25899 const struct arm_option_value_table *opt; … … 24879 25911 24880 25912 static bfd_boolean 24881 arm_parse_it_mode (c har * str)25913 arm_parse_it_mode (const char * str) 24882 25914 { 24883 25915 bfd_boolean ret = TRUE; … … 24902 25934 24903 25935 static bfd_boolean 24904 arm_ccs_mode (c har * unused ATTRIBUTE_UNUSED)25936 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED) 24905 25937 { 24906 25938 codecomposer_syntax = TRUE; … … 24932 25964 24933 25965 int 24934 md_parse_option (int c, c har * arg)25966 md_parse_option (int c, const char * arg) 24935 25967 { 24936 25968 struct arm_option_table *opt; … … 25060 26092 } cpu_arch_ver_table; 25061 26093 25062 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted 25063 least features first. */ 26094 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table 26095 must be sorted least features first but some reordering is needed, eg. for 26096 Thumb-2 instructions to be detected as coming from ARMv6T2. */ 25064 26097 static const cpu_arch_ver_table cpu_arch_ver[] = 25065 26098 { … … 25080 26113 {10, ARM_ARCH_V7M}, 25081 26114 {14, ARM_ARCH_V8A}, 26115 {16, ARM_ARCH_V8M_BASE}, 26116 {17, ARM_ARCH_V8M_MAIN}, 25082 26117 {0, ARM_ARCH_NONE} 25083 26118 }; … … 25103 26138 25104 26139 /* Set the public EABI object attributes. */ 25105 staticvoid26140 void 25106 26141 aeabi_set_public_attributes (void) 25107 26142 { … … 25110 26145 int virt_sec = 0; 25111 26146 int fp16_optional = 0; 26147 arm_feature_set arm_arch = ARM_ARCH_NONE; 25112 26148 arm_feature_set flags; 25113 26149 arm_feature_set tmp; 26150 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE; 25114 26151 const cpu_arch_ver_table *p; 25115 26152 … … 25148 26185 { 25149 26186 arch = p->val; 26187 arm_arch = p->flags; 25150 26188 ARM_CLEAR_FEATURE (tmp, tmp, p->flags); 25151 26189 } … … 25160 26198 and implicit cases. Avoid taking this path for -march=all by 25161 26199 checking for contradictory v7-A / v7-M features. */ 25162 if (arch == 1026200 if (arch == TAG_CPU_ARCH_V7 25163 26201 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a) 25164 26202 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m) 25165 26203 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp)) 25166 arch = 13; 26204 { 26205 arch = TAG_CPU_ARCH_V7E_M; 26206 arm_arch = (arm_feature_set) ARM_ARCH_V7EM; 26207 } 26208 26209 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base); 26210 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any)) 26211 { 26212 arch = TAG_CPU_ARCH_V8M_MAIN; 26213 arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN; 26214 } 26215 26216 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as 26217 coming from ARMv8-A. However, since ARMv8-A has more instructions than 26218 ARMv8-M, -march=all must be detected as ARMv8-A. */ 26219 if (arch == TAG_CPU_ARCH_V8M_MAIN 26220 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any)) 26221 { 26222 arch = TAG_CPU_ARCH_V8; 26223 arm_arch = (arm_feature_set) ARM_ARCH_V8A; 26224 } 25167 26225 25168 26226 /* Tag_CPU_name. */ … … 25187 26245 25188 26246 /* Tag_CPU_arch_profile. */ 25189 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)) 26247 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a) 26248 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8) 26249 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics) 26250 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))) 25190 26251 profile = 'A'; 25191 26252 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r)) … … 25199 26260 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile); 25200 26261 26262 /* Tag_DSP_extension. */ 26263 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp)) 26264 { 26265 arm_feature_set ext; 26266 26267 /* DSP instructions not in architecture. */ 26268 ARM_CLEAR_FEATURE (ext, flags, arm_arch); 26269 if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp)) 26270 aeabi_set_attribute_int (Tag_DSP_extension, 1); 26271 } 26272 25201 26273 /* Tag_ARM_ISA_use. */ 25202 26274 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1) … … 25207 26279 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t) 25208 26280 || arch == 0) 25209 aeabi_set_attribute_int (Tag_THUMB_ISA_use, 25210 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1); 26281 { 26282 int thumb_isa_use; 26283 26284 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8) 26285 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)) 26286 thumb_isa_use = 3; 26287 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2)) 26288 thumb_isa_use = 2; 26289 else 26290 thumb_isa_use = 1; 26291 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use); 26292 } 25211 26293 25212 26294 /* Tag_VFP_arch. */ 25213 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8)) 25214 aeabi_set_attribute_int (Tag_VFP_arch, 7); 26295 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd)) 26296 aeabi_set_attribute_int (Tag_VFP_arch, 26297 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32) 26298 ? 7 : 8); 25215 26299 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma)) 25216 26300 aeabi_set_attribute_int (Tag_VFP_arch, … … 25245 26329 25246 26330 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */ 25247 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8)) 26331 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1)) 26332 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4); 26333 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8)) 25248 26334 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3); 25249 26335 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1)) … … 25270 26356 but we have no architecture profile set, nor have we any ARM instructions. 25271 26357 25272 For ARMv8 we set the tag to 0 as integer divide is implied by the base25273 architecture.26358 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied 26359 by the base architecture. 25274 26360 25275 26361 For new architectures we will have to check these tests. */ 25276 gas_assert (arch <= TAG_CPU_ARCH_V8); 25277 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)) 26362 gas_assert (arch <= TAG_CPU_ARCH_V8 26363 || (arch >= TAG_CPU_ARCH_V8M_BASE 26364 && arch <= TAG_CPU_ARCH_V8M_MAIN)); 26365 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8) 26366 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m)) 25278 26367 aeabi_set_attribute_int (Tag_DIV_use, 0); 25279 26368 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv) … … 25420 26509 { 25421 26510 const struct arm_option_extension_value_table *opt; 26511 const arm_feature_set arm_any = ARM_ANY; 25422 26512 char saved_char; 25423 26513 char *name; … … 25440 26530 if (streq (opt->name, name)) 25441 26531 { 25442 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs)) 26532 int i, nb_allowed_archs = 26533 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]); 26534 for (i = 0; i < nb_allowed_archs; i++) 26535 { 26536 /* Empty entry. */ 26537 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any)) 26538 continue; 26539 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt)) 26540 break; 26541 } 26542 26543 if (i == nb_allowed_archs) 25443 26544 { 25444 26545 as_bad (_("architectural extension `%s' is not allowed for the " … … 25448 26549 25449 26550 if (adding_value) 25450 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value); 26551 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, 26552 opt->merge_value); 25451 26553 else 25452 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt-> value);26554 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value); 25453 26555 25454 26556 mcpu_cpu_opt = &selected_cpu; … … 25564 26666 T (Tag_T2EE_use), 25565 26667 T (Tag_Virtualization_use), 26668 T (Tag_DSP_extension), 25566 26669 /* We deliberately do not include Tag_MPextension_use_legacy. */ 25567 26670 #undef T … … 25580 26683 25581 26684 25582 /* Apply sym value for relocations only in the case that 25583 they are for local symbols and you have the respective25584 architectural feature for blx and simple switches. */26685 /* Apply sym value for relocations only in the case that they are for 26686 local symbols in the same segment as the fixup and you have the 26687 respective architectural feature for blx and simple switches. */ 25585 26688 int 25586 arm_apply_sym_value (struct fix * fixP )26689 arm_apply_sym_value (struct fix * fixP, segT this_seg) 25587 26690 { 25588 26691 if (fixP->fx_addsy 25589 26692 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t) 26693 /* PR 17444: If the local symbol is in a different section then a reloc 26694 will always be generated for it, so applying the symbol value now 26695 will result in a double offset being stored in the relocation. */ 26696 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg) 25590 26697 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)) 25591 26698 { … … 25601 26708 case BFD_RELOC_THUMB_PCREL_BLX: 25602 26709 if (THUMB_IS_FUNC (fixP->fx_addsy)) 25603 26710 return 1; 25604 26711 break; 25605 26712
Note:
See TracChangeset
for help on using the changeset viewer.